diff --git a/spp_aggregation/security/ir.model.access.csv b/spp_aggregation/security/ir.model.access.csv deleted file mode 100644 index 2917fd14..00000000 --- a/spp_aggregation/security/ir.model.access.csv +++ /dev/null @@ -1,7 +0,0 @@ -id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink -access_spp_aggregation_scope_read,spp.aggregation.scope.read,model_spp_aggregation_scope,group_aggregation_read,1,0,0,0 -access_spp_aggregation_scope_write,spp.aggregation.scope.write,model_spp_aggregation_scope,group_aggregation_write,1,1,1,1 -access_spp_aggregation_cache_entry_read,spp.aggregation.cache.entry.read,model_spp_aggregation_cache_entry,group_aggregation_read,1,0,0,0 -access_spp_aggregation_cache_entry_write,spp.aggregation.cache.entry.write,model_spp_aggregation_cache_entry,group_aggregation_write,1,1,1,1 -access_spp_aggregation_access_rule_read,spp.aggregation.access.rule.read,model_spp_aggregation_access_rule,group_aggregation_read,1,0,0,0 -access_spp_aggregation_access_rule_write,spp.aggregation.access.rule.write,model_spp_aggregation_access_rule,group_aggregation_manager,1,1,1,1 diff --git a/spp_aggregation/static/description/index.html b/spp_aggregation/static/description/index.html deleted file mode 100644 index 82ab927b..00000000 --- a/spp_aggregation/static/description/index.html +++ /dev/null @@ -1 +0,0 @@ -

spp_aggregation

diff --git a/spp_aggregation/README.rst b/spp_analytics/README.rst similarity index 94% rename from spp_aggregation/README.rst rename to spp_analytics/README.rst index 8648e962..4f09de58 100644 --- a/spp_aggregation/README.rst +++ b/spp_analytics/README.rst @@ -1,6 +1,6 @@ -========================== -OpenSPP Aggregation Engine -========================== +================= +OpenSPP Analytics +================= .. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -17,7 +17,7 @@ OpenSPP Aggregation Engine :target: http://www.gnu.org/licenses/lgpl-3.0-standalone.html :alt: License: LGPL-3 .. |badge3| image:: https://img.shields.io/badge/github-OpenSPP%2FOpenSPP2-lightgray.png?logo=github - :target: https://github.com/OpenSPP/OpenSPP2/tree/19.0/spp_aggregation + :target: https://github.com/OpenSPP/OpenSPP2/tree/19.0/spp_analytics :alt: OpenSPP/OpenSPP2 |badge1| |badge2| |badge3| @@ -121,7 +121,7 @@ Bug Tracker Bugs are tracked on `GitHub Issues `_. In case of trouble, please check there if your issue has already been reported. If you spotted it first, help us to smash it by providing a detailed and welcomed -`feedback `_. +`feedback `_. Do not contact contributors directly about support or help with technical issues. @@ -144,6 +144,6 @@ Current maintainer: |maintainer-jeremi| -This module is part of the `OpenSPP/OpenSPP2 `_ project on GitHub. +This module is part of the `OpenSPP/OpenSPP2 `_ project on GitHub. You are welcome to contribute. \ No newline at end of file diff --git a/spp_aggregation/__init__.py b/spp_analytics/__init__.py similarity index 100% rename from spp_aggregation/__init__.py rename to spp_analytics/__init__.py diff --git a/spp_aggregation/__manifest__.py b/spp_analytics/__manifest__.py similarity index 76% rename from spp_aggregation/__manifest__.py rename to spp_analytics/__manifest__.py index 582153e2..f3a523a8 100644 --- a/spp_aggregation/__manifest__.py +++ b/spp_analytics/__manifest__.py @@ -1,7 +1,7 @@ # Part of OpenSPP. See LICENSE file for full copyright and licensing details. { - "name": "OpenSPP Aggregation Engine", - "summary": "Unified aggregation service for statistics, simulations, and GIS queries", + "name": "OpenSPP Analytics", + "summary": "Query engine for indicators, simulations, and GIS analytics", "category": "OpenSPP", "version": "19.0.2.0.0", "sequence": 1, @@ -16,7 +16,7 @@ "spp_area", "spp_registry", "spp_security", - "spp_metrics_services", + "spp_metric_service", ], "data": [ # Security @@ -25,8 +25,8 @@ # Data "data/cron_cache_cleanup.xml", # Views - "views/aggregation_scope_views.xml", - "views/aggregation_access_views.xml", + "views/analytics_scope_views.xml", + "views/analytics_access_views.xml", "views/menu.xml", ], "assets": {}, diff --git a/spp_aggregation/data/cron_cache_cleanup.xml b/spp_analytics/data/cron_cache_cleanup.xml similarity index 76% rename from spp_aggregation/data/cron_cache_cleanup.xml rename to spp_analytics/data/cron_cache_cleanup.xml index e4a362bc..6710e29e 100644 --- a/spp_aggregation/data/cron_cache_cleanup.xml +++ b/spp_analytics/data/cron_cache_cleanup.xml @@ -2,8 +2,8 @@ - Aggregation: Cache Cleanup - + Analytics: Cache Cleanup + code model.cron_cleanup_expired() 1 diff --git a/spp_aggregation/models/__init__.py b/spp_analytics/models/__init__.py similarity index 64% rename from spp_aggregation/models/__init__.py rename to spp_analytics/models/__init__.py index 24cb09c3..9e56c1ee 100644 --- a/spp_aggregation/models/__init__.py +++ b/spp_analytics/models/__init__.py @@ -1,8 +1,8 @@ # Part of OpenSPP. See LICENSE file for full copyright and licensing details. -from . import aggregation_scope -from . import aggregation_access +from . import analytics_scope +from . import analytics_access from . import service_scope_resolver from . import service_cache -from . import statistic_registry +from . import indicator_registry from . import service_aggregation diff --git a/spp_aggregation/models/aggregation_access.py b/spp_analytics/models/analytics_access.py similarity index 97% rename from spp_aggregation/models/aggregation_access.py rename to spp_analytics/models/analytics_access.py index 6514bce2..4121f861 100644 --- a/spp_aggregation/models/aggregation_access.py +++ b/spp_analytics/models/analytics_access.py @@ -7,7 +7,7 @@ _logger = logging.getLogger(__name__) -class AggregationAccessRule(models.Model): +class AnalyticsAccessRule(models.Model): """ Access control rules for aggregation queries. @@ -18,7 +18,7 @@ class AggregationAccessRule(models.Model): Also controls k-anonymity thresholds and scope restrictions. """ - _name = "spp.aggregation.access.rule" + _name = "spp.analytics.access.rule" _description = "Aggregation Access Rule" _order = "sequence, name" @@ -101,7 +101,7 @@ class AggregationAccessRule(models.Model): ), ) allowed_scope_ids = fields.Many2many( - comodel_name="spp.aggregation.scope", + comodel_name="spp.analytics.scope", relation="spp_aggregation_access_rule_scope_rel", column1="rule_id", column2="scope_id", @@ -184,7 +184,7 @@ def get_effective_rule_for_user(self, user=None): :param user: res.users record (defaults to current user) :returns: Access rule record or None if no rule matches - :rtype: spp.aggregation.access.rule or None + :rtype: spp.analytics.access.rule or None """ user = user or self.env.user @@ -209,7 +209,7 @@ def check_scope_allowed(self, scope): """ Check if a scope is allowed under this rule. - :param scope: spp.aggregation.scope record or dict for inline scope + :param scope: spp.analytics.scope record or dict for inline scope :returns: True if allowed :raises: ValidationError if not allowed """ diff --git a/spp_aggregation/models/aggregation_scope.py b/spp_analytics/models/analytics_scope.py similarity index 97% rename from spp_aggregation/models/aggregation_scope.py rename to spp_analytics/models/analytics_scope.py index 00ea6fbd..8fbd0bbb 100644 --- a/spp_aggregation/models/aggregation_scope.py +++ b/spp_analytics/models/analytics_scope.py @@ -8,7 +8,7 @@ _logger = logging.getLogger(__name__) -class AggregationScope(models.Model): +class AnalyticsScope(models.Model): """ Unified targeting scope for aggregation queries. @@ -21,7 +21,7 @@ class AggregationScope(models.Model): - Explicit ID lists """ - _name = "spp.aggregation.scope" + _name = "spp.analytics.scope" _description = "Aggregation Scope" _order = "name" @@ -170,7 +170,7 @@ def _compute_registrant_count(self): else: # For other types, resolve and count try: - ids = self.env["spp.aggregation.scope.resolver"].resolve(scope) + ids = self.env["spp.analytics.scope.resolver"].resolve(scope) scope.registrant_count = len(ids) except (ValidationError, UserError) as e: _logger.debug("Could not compute registrant count for scope %s: %s", scope.id, e) @@ -253,7 +253,7 @@ def resolve_registrant_ids(self): :rtype: list[int] """ self.ensure_one() - return self.env["spp.aggregation.scope.resolver"].resolve(self) + return self.env["spp.analytics.scope.resolver"].resolve(self) def action_preview_registrants(self): """Action to preview registrants in this scope.""" @@ -276,7 +276,7 @@ def action_refresh_cache(self): updates the last_cache_refresh timestamp. """ self.ensure_one() - cache_service = self.env["spp.aggregation.cache"] + cache_service = self.env["spp.analytics.cache"] count = cache_service.invalidate_scope(self) if count: scope_name = self.name diff --git a/spp_aggregation/models/statistic_registry.py b/spp_analytics/models/indicator_registry.py similarity index 95% rename from spp_aggregation/models/statistic_registry.py rename to spp_analytics/models/indicator_registry.py index 7c12b9c5..ed4c773a 100644 --- a/spp_aggregation/models/statistic_registry.py +++ b/spp_analytics/models/indicator_registry.py @@ -6,7 +6,7 @@ _logger = logging.getLogger(__name__) -class StatisticRegistry(models.AbstractModel): +class IndicatorRegistry(models.AbstractModel): """Registry that maps statistic names to computation strategies. Replaces the fallback chain in compute_single_statistic with @@ -14,7 +14,7 @@ class StatisticRegistry(models.AbstractModel): how it should be computed. """ - _name = "spp.aggregation.statistic.registry" + _name = "spp.analytics.indicator.registry" _description = "Statistic Computation Registry" @api.model @@ -23,7 +23,7 @@ def compute(self, stat_name, registrant_ids, context=None): Lookup order: 1. Built-in statistics (count, gini) - 2. spp.statistic records (via CEL variable) + 2. spp.indicator records (via CEL variable) 3. spp.cel.variable records (direct) :param stat_name: Statistic name @@ -36,7 +36,7 @@ def compute(self, stat_name, registrant_ids, context=None): if builtin_method is not None: return builtin_method(registrant_ids) - # Try spp.statistic (if module installed) + # Try spp.indicator (if module installed) value = self._try_statistic_model(stat_name, registrant_ids) if value is not None: return value @@ -49,18 +49,18 @@ def compute(self, stat_name, registrant_ids, context=None): # Provide diagnostic information if debug logging is enabled if _logger.isEnabledFor(logging.DEBUG): # Check if models exist - has_stat_model = self.env.get("spp.statistic") is not None + has_stat_model = self.env.get("spp.indicator") is not None has_var_model = self.env.get("spp.cel.variable") is not None stat_count = 0 var_count = 0 if has_stat_model: - stat_count = self.env["spp.statistic"].sudo().search_count([]) # nosemgrep: odoo-sudo-without-context + stat_count = self.env["spp.indicator"].sudo().search_count([]) # nosemgrep: odoo-sudo-without-context if has_var_model: var_count = self.env["spp.cel.variable"].sudo().search_count([]) # nosemgrep: odoo-sudo-without-context _logger.debug( - "Statistic lookup failed for '%s'. Available: %d spp.statistic, %d spp.cel.variable", + "Statistic lookup failed for '%s'. Available: %d spp.indicator, %d spp.cel.variable", stat_name, stat_count, var_count, @@ -82,8 +82,8 @@ def list_available(self): for name, info in self._BUILTINS.items(): available.append({"name": name, "label": info["label"], "source": "builtin"}) - # From spp.statistic - stat_model = self.env.get("spp.statistic") + # From spp.indicator + stat_model = self.env.get("spp.indicator") if stat_model: for stat in stat_model.sudo().search([("active", "=", True)]): # nosemgrep: odoo-sudo-without-context available.append({"name": stat.name, "label": stat.label, "source": "statistic"}) @@ -145,13 +145,13 @@ def _compute_gini(self, registrant_ids): @api.model def _try_statistic_model(self, stat_name, registrant_ids): - """Try computing via spp.statistic record. + """Try computing via spp.indicator record. :param stat_name: Statistic name :param registrant_ids: List of partner IDs :returns: Computed value or None """ - stat_model = self.env.get("spp.statistic") + stat_model = self.env.get("spp.indicator") if stat_model is None: return None stat = stat_model.sudo().search([("name", "=", stat_name)], limit=1) # nosemgrep: odoo-sudo-without-context diff --git a/spp_aggregation/models/service_aggregation.py b/spp_analytics/models/service_aggregation.py similarity index 82% rename from spp_aggregation/models/service_aggregation.py rename to spp_analytics/models/service_aggregation.py index f5c23d98..ec268589 100644 --- a/spp_aggregation/models/service_aggregation.py +++ b/spp_analytics/models/service_aggregation.py @@ -8,7 +8,7 @@ _logger = logging.getLogger(__name__) -class AggregationService(models.AbstractModel): +class AnalyticsService(models.AbstractModel): """ Main aggregation service for unified statistics computation. @@ -19,8 +19,8 @@ class AggregationService(models.AbstractModel): a parameter, to prevent callers bypassing restrictions. """ - _name = "spp.aggregation.service" - _description = "Aggregation Service" + _name = "spp.analytics.service" + _description = "Analytics Service" MAX_GROUP_BY_DIMENSIONS = 3 @@ -36,10 +36,10 @@ def compute_aggregation( """ Compute aggregation for a scope with optional breakdown. - Access level is determined from user permissions (AggregationAccessRule), + Access level is determined from user permissions (AnalyticsAccessRule), NOT passed as a parameter. This prevents callers bypassing restrictions. - :param scope: spp.aggregation.scope record, ID, or inline dict definition + :param scope: spp.analytics.scope record, ID, or inline dict definition :param statistics: List of statistic names to compute (or None for defaults) :param group_by: List of dimension names for breakdown (max 3) :param context: Context string for configuration (e.g., "api", "dashboard") @@ -62,19 +62,22 @@ def compute_aggregation( # Resolve scope scope_record = self._resolve_scope(scope) + # Resolve user access context once (single DB lookup) + rule = self._get_effective_rule() + # Validate group_by dimensions group_by = group_by or [] - self._validate_group_by(group_by) + self._validate_group_by(group_by, rule) - # Determine access level from user permissions - access_level = self._determine_access_level() - k_threshold = self._get_k_threshold() + # Determine access level and k-threshold from the resolved rule + access_level = self._access_level_from_rule(rule) + k_threshold = self._k_threshold_from_rule(rule) # Check scope is allowed for user - self._check_scope_allowed(scope) + self._check_scope_allowed(scope, rule) # Check cache if enabled - cache_service = self.env["spp.aggregation.cache"] + cache_service = self.env["spp.analytics.cache"] if use_cache: cached_result = cache_service.get_cached_result(scope_record, statistics, group_by) if cached_result: @@ -109,7 +112,7 @@ def compute_aggregation( result["breakdown"] = self._compute_breakdown(registrant_ids, group_by, statistics, context) # Apply privacy protections - privacy_service = self.env["spp.metrics.privacy"] + privacy_service = self.env["spp.metric.privacy"] result = privacy_service.enforce(result, k_threshold, access_level) # Store in cache if enabled @@ -123,7 +126,7 @@ def _resolve_scope(self, scope): Resolve scope input to a scope record. :param scope: Record, ID, or dict - :returns: spp.aggregation.scope record or dict + :returns: spp.analytics.scope record or dict """ if isinstance(scope, dict): # Inline scope definition @@ -131,16 +134,28 @@ def _resolve_scope(self, scope): if isinstance(scope, int): # Scope ID - return self.env["spp.aggregation.scope"].browse(scope) + return self.env["spp.analytics.scope"].browse(scope) # Assume it's already a record return scope - def _validate_group_by(self, group_by): + def _get_effective_rule(self, user=None): + """ + Look up the effective access rule for a user (single DB query). + + :param user: res.users record (defaults to current user) + :returns: access rule record or None + """ + user = user or self.env.user + # Use sudo() to read access rules - this is an internal security check + return self.env["spp.analytics.access.rule"].sudo().get_effective_rule_for_user(user) # nosemgrep: odoo-sudo-without-context # noqa: E501 # fmt: skip + + def _validate_group_by(self, group_by, rule=None): """ Validate group_by dimensions. :param group_by: List of dimension names + :param rule: Pre-resolved access rule (or None) :raises: ValidationError if invalid """ if len(group_by) > self.MAX_GROUP_BY_DIMENSIONS: @@ -155,43 +170,42 @@ def _validate_group_by(self, group_by): raise ValidationError(_("Unknown dimension: %s") % dim_name) # Check access rule dimension restrictions - user = self.env.user - # Use sudo() to read access rules - this is an internal security check - rule = self.env["spp.aggregation.access.rule"].sudo().get_effective_rule_for_user(user) # nosemgrep: odoo-sudo-without-context # noqa: E501 # fmt: skip if rule and group_by: rule.check_dimensions_allowed(group_by) - def _determine_access_level(self, user=None): + def _access_level_from_rule(self, rule): """ - Determine access level from user permissions. + Extract access level from a pre-resolved rule. - :param user: res.users record (defaults to current user) + :param rule: access rule record or None :returns: "aggregate" or "individual" :rtype: str """ - return self.env["spp.metrics.privacy"].validate_access_level(user) + if rule: + return rule.access_level + # Default to aggregate-only for safety + return "aggregate" - def _get_k_threshold(self, user=None): + def _k_threshold_from_rule(self, rule): """ - Get k-anonymity threshold for user. + Extract k-anonymity threshold from a pre-resolved rule. - :param user: res.users record (defaults to current user) + :param rule: access rule record or None :returns: k threshold value :rtype: int """ - return self.env["spp.metrics.privacy"].get_k_threshold(user) + if rule: + return rule.minimum_k_anonymity + return self.env["spp.metric.privacy"].DEFAULT_K_THRESHOLD - def _check_scope_allowed(self, scope): + def _check_scope_allowed(self, scope, rule=None): """ Check if scope is allowed for current user. :param scope: Scope record or dict + :param rule: Pre-resolved access rule (or None) :raises: AccessError if not allowed """ - user = self.env.user - # Use sudo() to read access rules - this is an internal security check - rule = self.env["spp.aggregation.access.rule"].sudo().get_effective_rule_for_user(user) # nosemgrep: odoo-sudo-without-context # noqa: E501 # fmt: skip - if not rule: # No explicit rule - allow with defaults return @@ -209,7 +223,7 @@ def _get_registrant_ids(self, scope): :returns: List of partner IDs :rtype: list[int] """ - resolver = self.env["spp.aggregation.scope.resolver"] + resolver = self.env["spp.analytics.scope.resolver"] return resolver.resolve(scope) def _compute_statistics(self, registrant_ids, statistics, context=None, k_threshold=None): @@ -226,14 +240,14 @@ def _compute_statistics(self, registrant_ids, statistics, context=None, k_thresh total_count = len(registrant_ids) statistic_by_name = {} - statistic_model = self.env.get("spp.statistic") + statistic_model = self.env.get("spp.indicator") if statistic_model is not None: statistic_records = statistic_model.sudo().search( # nosemgrep: odoo-sudo-without-context [("name", "in", statistics)] ) statistic_by_name = {record.name: record for record in statistic_records} - privacy_service = self.env["spp.metrics.privacy"] + privacy_service = self.env["spp.metric.privacy"] for stat_name in statistics: try: @@ -305,14 +319,14 @@ def _compute_single_statistic(self, stat_name, registrant_ids, context=None): :param context: Context string :returns: Computed value """ - registry = self.env["spp.aggregation.statistic.registry"] + registry = self.env["spp.analytics.indicator.registry"] return registry.compute(stat_name, registrant_ids, context) def _compute_breakdown(self, registrant_ids, group_by, statistics, context=None): """ Compute breakdown by dimensions. - Delegates to spp.metrics.breakdown service. + Delegates to spp.metric.breakdown service. :param registrant_ids: List of partner IDs :param group_by: List of dimension names @@ -321,7 +335,7 @@ def _compute_breakdown(self, registrant_ids, group_by, statistics, context=None) :returns: Breakdown dictionary :rtype: dict """ - breakdown_service = self.env["spp.metrics.breakdown"] + breakdown_service = self.env["spp.metric.breakdown"] return breakdown_service.compute_breakdown(registrant_ids, group_by, statistics, context) # ------------------------------------------------------------------------- @@ -379,7 +393,7 @@ def compute_fairness(self, scope, dimensions=None, **kwargs): # Get base domain for population base_domain = [("is_registrant", "=", True)] - fairness_service = self.env["spp.metrics.fairness"] + fairness_service = self.env["spp.metric.fairness"] return fairness_service.compute_fairness(registrant_ids, base_domain, dimensions) @api.model @@ -391,5 +405,5 @@ def compute_distribution(self, amounts): :returns: Distribution result :rtype: dict """ - distribution_service = self.env["spp.metrics.distribution"] + distribution_service = self.env["spp.metric.distribution"] return distribution_service.compute_distribution(amounts) diff --git a/spp_aggregation/models/service_cache.py b/spp_analytics/models/service_cache.py similarity index 92% rename from spp_aggregation/models/service_cache.py rename to spp_analytics/models/service_cache.py index 8f814899..0140c466 100644 --- a/spp_aggregation/models/service_cache.py +++ b/spp_analytics/models/service_cache.py @@ -9,13 +9,13 @@ _logger = logging.getLogger(__name__) -class AggregationCacheService(models.AbstractModel): +class AnalyticsCacheService(models.AbstractModel): """ Cache service for aggregation results. This service manages caching of aggregation results to improve performance for frequently requested scopes and statistics. Results are stored in - spp.aggregation.cache.entry with TTL-based expiration. + spp.analytics.cache.entry with TTL-based expiration. TTL Configuration by Scope Type: - area: 1 hour (3600 seconds) - administrative data is relatively static @@ -26,7 +26,7 @@ class AggregationCacheService(models.AbstractModel): - explicit: 30 minutes (1800 seconds) - explicit lists may change """ - _name = "spp.aggregation.cache" + _name = "spp.analytics.cache" _description = "Aggregation Cache Service" # TTL configuration in seconds @@ -44,7 +44,7 @@ def get_cached_result(self, scope, statistics=None, group_by=None): """ Get cached aggregation result if available and not expired. - :param scope: spp.aggregation.scope record, ID, or dict + :param scope: spp.analytics.scope record, ID, or dict :param statistics: List of statistic names (None for defaults) :param group_by: List of dimension names for breakdown :returns: Cached result dictionary or None if not found/expired @@ -65,7 +65,7 @@ def get_cached_result(self, scope, statistics=None, group_by=None): # Find cache entry (use sudo for internal caching operation) entry = ( - self.env["spp.aggregation.cache.entry"] # nosemgrep: odoo-sudo-without-context + self.env["spp.analytics.cache.entry"] # nosemgrep: odoo-sudo-without-context .sudo() .search( [("cache_key", "=", cache_key)], @@ -107,7 +107,7 @@ def store_result(self, scope, statistics, group_by, result): """ Store aggregation result in cache. - :param scope: spp.aggregation.scope record, ID, or dict + :param scope: spp.analytics.scope record, ID, or dict :param statistics: List of statistic names :param group_by: List of dimension names :param result: Aggregation result dictionary @@ -135,7 +135,7 @@ def store_result(self, scope, statistics, group_by, result): return False # Store or update cache entry (use sudo for internal caching operation) - cache_model = self.env["spp.aggregation.cache.entry"].sudo() # nosemgrep: odoo-sudo-without-context + cache_model = self.env["spp.analytics.cache.entry"].sudo() # nosemgrep: odoo-sudo-without-context existing = cache_model.search( [("cache_key", "=", cache_key)], limit=1, @@ -169,7 +169,7 @@ def invalidate_scope(self, scope): For more granular invalidation, consider adding a scope_id foreign key to the cache entry model in a future update. - :param scope: spp.aggregation.scope record, ID, or dict + :param scope: spp.analytics.scope record, ID, or dict :returns: Number of cache entries invalidated :rtype: int """ @@ -180,7 +180,7 @@ def invalidate_scope(self, scope): # This is a conservative approach - it may invalidate more than needed, # but ensures consistency entries = ( - self.env["spp.aggregation.cache.entry"] # nosemgrep: odoo-sudo-without-context + self.env["spp.analytics.cache.entry"] # nosemgrep: odoo-sudo-without-context .sudo() .search([("scope_type", "=", scope_type)]) ) @@ -207,7 +207,7 @@ def invalidate_all(self): :returns: Number of cache entries invalidated :rtype: int """ - entries = self.env["spp.aggregation.cache.entry"].sudo().search([]) # nosemgrep: odoo-sudo-without-context + entries = self.env["spp.analytics.cache.entry"].sudo().search([]) # nosemgrep: odoo-sudo-without-context count = len(entries) if count > 0: @@ -237,7 +237,7 @@ def cleanup_expired(self): expires_before = now - timedelta(seconds=ttl) entries = ( - self.env["spp.aggregation.cache.entry"] # nosemgrep: odoo-sudo-without-context + self.env["spp.analytics.cache.entry"] # nosemgrep: odoo-sudo-without-context .sudo() .search( [ @@ -364,12 +364,12 @@ def _resolve_scope(self, scope): return scope if isinstance(scope, int): - return self.env["spp.aggregation.scope"].browse(scope) + return self.env["spp.analytics.scope"].browse(scope) return scope -class AggregationCacheEntry(models.Model): +class AnalyticsCacheEntry(models.Model): """ Cache entry for aggregation results. @@ -377,7 +377,7 @@ class AggregationCacheEntry(models.Model): a single aggregation query result. """ - _name = "spp.aggregation.cache.entry" + _name = "spp.analytics.cache.entry" _description = "Aggregation Cache Entry" _order = "computed_at desc" @@ -432,4 +432,4 @@ def cron_cleanup_expired(self): :returns: Number of cache entries removed :rtype: int """ - return self.env["spp.aggregation.cache"].cleanup_expired() + return self.env["spp.analytics.cache"].cleanup_expired() diff --git a/spp_aggregation/models/service_scope_resolver.py b/spp_analytics/models/service_scope_resolver.py similarity index 97% rename from spp_aggregation/models/service_scope_resolver.py rename to spp_analytics/models/service_scope_resolver.py index efdca19a..07476d21 100644 --- a/spp_aggregation/models/service_scope_resolver.py +++ b/spp_analytics/models/service_scope_resolver.py @@ -14,7 +14,7 @@ class ScopeResolverService(models.AbstractModel): Each scope type has a dedicated resolver method. """ - _name = "spp.aggregation.scope.resolver" + _name = "spp.analytics.scope.resolver" _description = "Aggregation Scope Resolver" @api.model @@ -22,7 +22,7 @@ def resolve(self, scope): """ Resolve a scope to a list of partner IDs. - :param scope: spp.aggregation.scope record or dict for inline scope + :param scope: spp.analytics.scope record or dict for inline scope :returns: List of partner IDs :rtype: list[int] """ @@ -245,7 +245,7 @@ def _resolve_spatial_polygon_geometry(self, geojson_str): install the spp_aggregation_spatial bridge module. """ # Check if PostGIS bridge is available - spatial_resolver = self.env.get("spp.aggregation.spatial.resolver") + spatial_resolver = self.env.get("spp.analytics.spatial.resolver") if spatial_resolver: return spatial_resolver.resolve_polygon(geojson_str) @@ -280,7 +280,7 @@ def _resolve_spatial_buffer_params(self, latitude, longitude, radius_km): return [] # Check if PostGIS bridge is available - spatial_resolver = self.env.get("spp.aggregation.spatial.resolver") + spatial_resolver = self.env.get("spp.analytics.spatial.resolver") if spatial_resolver: return spatial_resolver.resolve_buffer(latitude, longitude, radius_km) diff --git a/spp_aggregation/pyproject.toml b/spp_analytics/pyproject.toml similarity index 100% rename from spp_aggregation/pyproject.toml rename to spp_analytics/pyproject.toml diff --git a/spp_aggregation/readme/DESCRIPTION.md b/spp_analytics/readme/DESCRIPTION.md similarity index 100% rename from spp_aggregation/readme/DESCRIPTION.md rename to spp_analytics/readme/DESCRIPTION.md diff --git a/spp_aggregation/security/aggregation_security.xml b/spp_analytics/security/aggregation_security.xml similarity index 91% rename from spp_aggregation/security/aggregation_security.xml rename to spp_analytics/security/aggregation_security.xml index 1ea3bcf6..4175a337 100644 --- a/spp_aggregation/security/aggregation_security.xml +++ b/spp_analytics/security/aggregation_security.xml @@ -2,19 +2,19 @@ - Aggregation Engine - Aggregation queries and statistical analysis + Analytics Engine + Analytics queries and statistical analysis 36 - Aggregation + Analytics Access to aggregation queries and statistical analysis + >Access to analytics queries and statistical analysis diff --git a/spp_analytics/security/ir.model.access.csv b/spp_analytics/security/ir.model.access.csv new file mode 100644 index 00000000..723acd65 --- /dev/null +++ b/spp_analytics/security/ir.model.access.csv @@ -0,0 +1,7 @@ +id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink +access_spp_analytics_scope_read,spp.analytics.scope.read,model_spp_analytics_scope,group_aggregation_read,1,0,0,0 +access_spp_analytics_scope_write,spp.analytics.scope.write,model_spp_analytics_scope,group_aggregation_write,1,1,1,1 +access_spp_analytics_cache_entry_read,spp.analytics.cache.entry.read,model_spp_analytics_cache_entry,group_aggregation_read,1,0,0,0 +access_spp_analytics_cache_entry_write,spp.analytics.cache.entry.write,model_spp_analytics_cache_entry,group_aggregation_write,1,1,1,1 +access_spp_analytics_access_rule_read,spp.analytics.access.rule.read,model_spp_analytics_access_rule,group_aggregation_read,1,0,0,0 +access_spp_analytics_access_rule_write,spp.analytics.access.rule.write,model_spp_analytics_access_rule,group_aggregation_manager,1,1,1,1 diff --git a/spp_aggregation/services/__init__.py b/spp_analytics/services/__init__.py similarity index 100% rename from spp_aggregation/services/__init__.py rename to spp_analytics/services/__init__.py diff --git a/spp_aggregation/services/scope_builder.py b/spp_analytics/services/scope_builder.py similarity index 84% rename from spp_aggregation/services/scope_builder.py rename to spp_analytics/services/scope_builder.py index ebab5bcb..19fc439e 100644 --- a/spp_aggregation/services/scope_builder.py +++ b/spp_analytics/services/scope_builder.py @@ -4,8 +4,8 @@ This module provides a unified interface for constructing scope dictionaries that are compatible with the aggregation engine's scope resolver. -The scope dictionaries can be passed directly to spp.aggregation.service.compute_aggregation() -and will be resolved by spp.aggregation.scope.resolver. +The scope dictionaries can be passed directly to spp.analytics.service.compute_aggregation() +and will be resolved by spp.analytics.scope.resolver. """ @@ -26,7 +26,7 @@ def build_area_scope(area_id, include_children=True): Example: >>> scope = build_area_scope(area_id=123, include_children=True) - >>> result = env['spp.aggregation.service'].compute_aggregation(scope=scope) + >>> result = env['spp.analytics.service'].compute_aggregation(scope=scope) """ return { "scope_type": "area", @@ -52,7 +52,7 @@ def build_cel_scope(cel_expression, profile="registry_individuals"): Example: >>> scope = build_cel_scope("partner.age > 18") - >>> result = env['spp.aggregation.service'].compute_aggregation(scope=scope) + >>> result = env['spp.analytics.service'].compute_aggregation(scope=scope) """ return { "scope_type": "cel", @@ -76,7 +76,7 @@ def build_explicit_scope(partner_ids): Example: >>> scope = build_explicit_scope([1, 2, 3]) - >>> result = env['spp.aggregation.service'].compute_aggregation(scope=scope) + >>> result = env['spp.analytics.service'].compute_aggregation(scope=scope) """ return { "scope_type": "explicit", diff --git a/spp_aggregation/static/description/icon.png b/spp_analytics/static/description/icon.png similarity index 100% rename from spp_aggregation/static/description/icon.png rename to spp_analytics/static/description/icon.png diff --git a/spp_analytics/static/description/index.html b/spp_analytics/static/description/index.html new file mode 100644 index 00000000..7403c470 --- /dev/null +++ b/spp_analytics/static/description/index.html @@ -0,0 +1 @@ +

spp_analytics

diff --git a/spp_aggregation/tests/README_INTEGRATION_TESTS.md b/spp_analytics/tests/README_INTEGRATION_TESTS.md similarity index 90% rename from spp_aggregation/tests/README_INTEGRATION_TESTS.md rename to spp_analytics/tests/README_INTEGRATION_TESTS.md index 5fbc20d4..df13f373 100644 --- a/spp_aggregation/tests/README_INTEGRATION_TESTS.md +++ b/spp_analytics/tests/README_INTEGRATION_TESTS.md @@ -1,4 +1,4 @@ -# Integration Tests for spp_aggregation +# Integration Tests for spp_analytics ## Overview @@ -29,12 +29,12 @@ To run the full integration tests with realistic demo data: ```bash # Test both modules together -./scripts/test_single_module.sh spp_aggregation,spp_mis_demo_v2 +./scripts/test_single_module.sh spp_analytics,spp_mis_demo_v2 ``` This will: -- Install both `spp_aggregation` and `spp_mis_demo_v2` +- Install both `spp_analytics` and `spp_mis_demo_v2` - Generate ~50 household groups with members (realistic demographics) - Run all aggregation tests including the 15+ integration test scenarios @@ -46,12 +46,12 @@ To run just the unit tests without demo data: ```bash # Test aggregation module only -./scripts/test_single_module.sh spp_aggregation +./scripts/test_single_module.sh spp_analytics ``` This will: -- Install only `spp_aggregation` with minimal dependencies +- Install only `spp_analytics` with minimal dependencies - Run all unit tests (85+ tests) - Skip integration tests that require demo data @@ -119,7 +119,7 @@ In CI pipelines, use the unit-only approach for faster feedback: # .gitlab-ci.yml or .github/workflows test-aggregation: script: - - ./scripts/test_single_module.sh spp_aggregation + - ./scripts/test_single_module.sh spp_analytics ``` For comprehensive integration testing (nightly builds): @@ -127,7 +127,7 @@ For comprehensive integration testing (nightly builds): ```yaml test-aggregation-integration: script: - - ./scripts/test_single_module.sh spp_aggregation,spp_mis_demo_v2 + - ./scripts/test_single_module.sh spp_analytics,spp_mis_demo_v2 only: - schedules ``` @@ -187,11 +187,11 @@ Planned improvements for integration tests: ### "Module spp_mis_demo_v2 not installed" -This is expected when running `./scripts/test_single_module.sh spp_aggregation` alone. -To run integration tests, use: +This is expected when running `./scripts/test_single_module.sh spp_analytics` alone. To +run integration tests, use: ```bash -./scripts/test_single_module.sh spp_aggregation,spp_mis_demo_v2 +./scripts/test_single_module.sh spp_analytics,spp_mis_demo_v2 ``` ### "No areas found in demo data" @@ -206,12 +206,12 @@ The demo generator may have failed. Check logs for: If aggregation takes > 10s: - Check database indices on `res.partner.area_id` -- Review `spp.aggregation.cache` configuration +- Review `spp.analytics.cache` configuration - Ensure PostgreSQL has sufficient resources ## Related Documentation -- `spp_aggregation/README.md` - Module overview and architecture +- `spp_analytics/README.md` - Module overview and architecture - `spp_mis_demo_v2/README.md` - Demo data generator documentation - `docs/principles/privacy-protection.md` - K-anonymity principles - `docs/principles/performance-scalability.md` - Performance guidelines diff --git a/spp_aggregation/tests/__init__.py b/spp_analytics/tests/__init__.py similarity index 78% rename from spp_aggregation/tests/__init__.py rename to spp_analytics/tests/__init__.py index d74726e2..6e41c0f0 100644 --- a/spp_aggregation/tests/__init__.py +++ b/spp_analytics/tests/__init__.py @@ -2,8 +2,8 @@ from . import common from . import test_access_rule_area_restrictions -from . import test_aggregation_scope -from . import test_aggregation_service +from . import test_analytics_scope +from . import test_analytics_service from . import test_cache_service from . import test_distribution_service from . import test_fairness_service @@ -11,5 +11,5 @@ from . import test_privacy_enforcement from . import test_scope_builder from . import test_scope_resolver -from . import test_statistic_registry +from . import test_indicator_registry from . import test_coverage diff --git a/spp_aggregation/tests/common.py b/spp_analytics/tests/common.py similarity index 94% rename from spp_aggregation/tests/common.py rename to spp_analytics/tests/common.py index b78c25d1..a87cda73 100644 --- a/spp_aggregation/tests/common.py +++ b/spp_analytics/tests/common.py @@ -2,7 +2,7 @@ from odoo.tests.common import TransactionCase -class AggregationTestCase(TransactionCase): +class AnalyticsTestCase(TransactionCase): """Base test case for aggregation module tests.""" @classmethod @@ -78,7 +78,7 @@ def create_scope(self, scope_type, **kwargs): "scope_type": scope_type, } vals.update(kwargs) - return self.env["spp.aggregation.scope"].create(vals) + return self.env["spp.analytics.scope"].create(vals) def create_access_rule(self, access_level, **kwargs): """Helper to create access rules.""" @@ -88,4 +88,4 @@ def create_access_rule(self, access_level, **kwargs): "group_id": self.env.ref("base.group_user").id, } vals.update(kwargs) - return self.env["spp.aggregation.access.rule"].create(vals) + return self.env["spp.analytics.access.rule"].create(vals) diff --git a/spp_aggregation/tests/run_integration_tests.sh b/spp_analytics/tests/run_integration_tests.sh similarity index 85% rename from spp_aggregation/tests/run_integration_tests.sh rename to spp_analytics/tests/run_integration_tests.sh index 93319c8c..7bad289f 100755 --- a/spp_aggregation/tests/run_integration_tests.sh +++ b/spp_analytics/tests/run_integration_tests.sh @@ -1,13 +1,13 @@ #!/bin/bash # Part of OpenSPP. See LICENSE file for full copyright and licensing details. # -# Run integration tests for spp_aggregation with MIS demo data. +# Run integration tests for spp_analytics with MIS demo data. # -# This script installs both spp_aggregation and spp_mis_demo_v2 to enable +# This script installs both spp_analytics and spp_mis_demo_v2 to enable # comprehensive integration testing with realistic demo data. # # Usage: -# ./spp_aggregation/tests/run_integration_tests.sh +# ./spp_analytics/tests/run_integration_tests.sh # # Options: # --unit-only Run unit tests only (skip demo data generation) @@ -35,7 +35,7 @@ while [[ $# -gt 0 ]]; do shift ;; --help) - echo "Run integration tests for spp_aggregation" + echo "Run integration tests for spp_analytics" echo "" echo "Usage: $0 [OPTIONS]" echo "" @@ -64,21 +64,21 @@ cd "$REPO_ROOT" # Run tests if [ $UNIT_ONLY -eq 1 ]; then echo "==========================================" - echo "Running UNIT tests for spp_aggregation" + echo "Running UNIT tests for spp_analytics" echo "==========================================" echo "" echo "Integration tests will be skipped (spp_mis_demo_v2 not installed)" echo "" - ./scripts/test_single_module.sh spp_aggregation + ./scripts/test_single_module.sh spp_analytics else echo "==========================================" - echo "Running INTEGRATION tests for spp_aggregation" + echo "Running INTEGRATION tests for spp_analytics" echo "==========================================" echo "" echo "This will:" - echo " - Install spp_aggregation + spp_mis_demo_v2" + echo " - Install spp_analytics + spp_mis_demo_v2" echo " - Generate ~50 household groups with members" echo " - Run 100+ tests including integration scenarios" echo " - Test k-anonymity, performance, privacy protection" @@ -93,7 +93,7 @@ else fi # Run with both modules - ./scripts/test_single_module.sh spp_aggregation,spp_mis_demo_v2 + ./scripts/test_single_module.sh spp_analytics,spp_mis_demo_v2 fi # Show summary diff --git a/spp_aggregation/tests/test_access_rule_area_restrictions.py b/spp_analytics/tests/test_access_rule_area_restrictions.py similarity index 94% rename from spp_aggregation/tests/test_access_rule_area_restrictions.py rename to spp_analytics/tests/test_access_rule_area_restrictions.py index 1df4c4e3..d2c26ef3 100644 --- a/spp_aggregation/tests/test_access_rule_area_restrictions.py +++ b/spp_analytics/tests/test_access_rule_area_restrictions.py @@ -94,7 +94,7 @@ def setUpClass(cls): def test_access_rule_with_area_restrictions(self): """Test that access rules can have area restrictions.""" - rule = self.env["spp.aggregation.access.rule"].create( + rule = self.env["spp.analytics.access.rule"].create( { "name": "North District Only Rule", "user_id": self.restricted_user.id, @@ -110,7 +110,7 @@ def test_access_rule_with_area_restrictions(self): def test_explicit_scope_rejected_when_outside_allowed_areas(self): """Test that explicit scopes are rejected when registrants are outside allowed areas.""" # Create rule restricting to North district only - rule = self.env["spp.aggregation.access.rule"].create( + rule = self.env["spp.analytics.access.rule"].create( { "name": "North District Only Rule", "user_id": self.restricted_user.id, @@ -134,7 +134,7 @@ def test_explicit_scope_rejected_when_outside_allowed_areas(self): def test_explicit_scope_allowed_when_within_allowed_areas(self): """Test that explicit scopes are allowed when all registrants are within allowed areas.""" # Create rule restricting to North district only - rule = self.env["spp.aggregation.access.rule"].create( + rule = self.env["spp.analytics.access.rule"].create( { "name": "North District Only Rule", "user_id": self.restricted_user.id, @@ -156,7 +156,7 @@ def test_explicit_scope_allowed_when_within_allowed_areas(self): def test_explicit_scope_rejected_when_mixed_areas(self): """Test that explicit scopes are rejected when some registrants are outside allowed areas.""" # Create rule restricting to North district only - rule = self.env["spp.aggregation.access.rule"].create( + rule = self.env["spp.analytics.access.rule"].create( { "name": "North District Only Rule", "user_id": self.restricted_user.id, @@ -180,7 +180,7 @@ def test_explicit_scope_rejected_when_mixed_areas(self): def test_explicit_scope_allowed_when_no_area_restrictions(self): """Test that explicit scopes are allowed when user has no area restrictions.""" # Create rule without area restrictions - rule = self.env["spp.aggregation.access.rule"].create( + rule = self.env["spp.analytics.access.rule"].create( { "name": "Unrestricted Rule", "user_id": self.unrestricted_user.id, @@ -205,7 +205,7 @@ def test_explicit_scope_allowed_when_no_area_restrictions(self): def test_explicit_scope_allowed_with_multiple_allowed_areas(self): """Test that explicit scopes work correctly with multiple allowed areas.""" # Create rule allowing North and South districts - rule = self.env["spp.aggregation.access.rule"].create( + rule = self.env["spp.analytics.access.rule"].create( { "name": "North and South Rule", "user_id": self.restricted_user.id, @@ -242,7 +242,7 @@ def test_explicit_scope_allowed_with_multiple_allowed_areas(self): def test_area_only_scope_type_rejects_explicit_scopes(self): """Test that area_only scope type restriction rejects explicit scopes even within allowed areas.""" # Create rule with area_only scope type restriction - rule = self.env["spp.aggregation.access.rule"].create( + rule = self.env["spp.analytics.access.rule"].create( { "name": "Area Only Rule", "user_id": self.restricted_user.id, @@ -267,7 +267,7 @@ def test_area_only_scope_type_rejects_explicit_scopes(self): def test_aggregation_service_enforces_area_restrictions_on_gis_explicit_scopes(self): """Test that AggregationService enforces area restrictions on GIS-generated explicit scopes.""" # Create rule restricting to North district only - self.env["spp.aggregation.access.rule"].create( + self.env["spp.analytics.access.rule"].create( { "name": "North District Only Rule", "user_id": self.restricted_user.id, @@ -279,7 +279,7 @@ def test_aggregation_service_enforces_area_restrictions_on_gis_explicit_scopes(s # Switch to restricted user restricted_env = self.env(user=self.restricted_user) - aggregation_service = restricted_env["spp.aggregation.service"] + aggregation_service = restricted_env["spp.analytics.service"] # Try to compute aggregation for South registrants (simulating GIS query) scope = { @@ -295,7 +295,7 @@ def test_aggregation_service_enforces_area_restrictions_on_gis_explicit_scopes(s def test_aggregation_service_allows_area_restricted_explicit_scopes(self): """Test that AggregationService allows explicit scopes within allowed areas.""" # Create rule restricting to North district only - self.env["spp.aggregation.access.rule"].create( + self.env["spp.analytics.access.rule"].create( { "name": "North District Only Rule", "user_id": self.restricted_user.id, @@ -307,7 +307,7 @@ def test_aggregation_service_allows_area_restricted_explicit_scopes(self): # Switch to restricted user restricted_env = self.env(user=self.restricted_user) - aggregation_service = restricted_env["spp.aggregation.service"] + aggregation_service = restricted_env["spp.analytics.service"] # Compute aggregation for North registrants (should succeed) scope = { diff --git a/spp_aggregation/tests/test_aggregation_scope.py b/spp_analytics/tests/test_analytics_scope.py similarity index 96% rename from spp_aggregation/tests/test_aggregation_scope.py rename to spp_analytics/tests/test_analytics_scope.py index c7ede8d0..77490162 100644 --- a/spp_aggregation/tests/test_aggregation_scope.py +++ b/spp_analytics/tests/test_analytics_scope.py @@ -1,11 +1,11 @@ # Part of OpenSPP. See LICENSE file for full copyright and licensing details. from odoo.exceptions import ValidationError -from .common import AggregationTestCase +from .common import AnalyticsTestCase -class TestAggregationScope(AggregationTestCase): - """Tests for spp.aggregation.scope model.""" +class TestAnalyticsScope(AnalyticsTestCase): + """Tests for spp.analytics.scope model.""" def test_create_cel_scope(self): """Test creating a CEL expression scope.""" diff --git a/spp_aggregation/tests/test_aggregation_service.py b/spp_analytics/tests/test_analytics_service.py similarity index 90% rename from spp_aggregation/tests/test_aggregation_service.py rename to spp_analytics/tests/test_analytics_service.py index f05fb1e7..0314014a 100644 --- a/spp_aggregation/tests/test_aggregation_service.py +++ b/spp_analytics/tests/test_analytics_service.py @@ -1,16 +1,16 @@ # Part of OpenSPP. See LICENSE file for full copyright and licensing details. from odoo.exceptions import AccessError, ValidationError -from .common import AggregationTestCase +from .common import AnalyticsTestCase -class TestAggregationService(AggregationTestCase): - """Tests for spp.aggregation.service main entry point.""" +class TestAnalyticsService(AnalyticsTestCase): + """Tests for spp.analytics.service main entry point.""" @classmethod def setUpClass(cls): super().setUpClass() - cls.service = cls.env["spp.aggregation.service"] + cls.service = cls.env["spp.analytics.service"] def test_compute_aggregation_basic(self): """Test basic aggregation with explicit scope.""" @@ -150,8 +150,8 @@ def test_privacy_enforced_on_result(self): def test_statistic_suppression_uses_stricter_threshold(self): """Test top-level statistic suppression uses max(user_k, stat_k).""" - if "spp.statistic" not in self.env: - self.skipTest("spp_statistic module not installed") + if "spp.indicator" not in self.env: + self.skipTest("spp_indicator module not installed") variable = self.env["spp.cel.variable"].create( { @@ -163,7 +163,7 @@ def test_statistic_suppression_uses_stricter_threshold(self): "state": "active", } ) - self.env["spp.statistic"].create( + self.env["spp.indicator"].create( { "name": "agg_suppression_test_stat", "label": "Aggregation Suppression Test", @@ -172,7 +172,7 @@ def test_statistic_suppression_uses_stricter_threshold(self): "is_published_api": True, } ) - self.env["spp.aggregation.access.rule"].create( + self.env["spp.analytics.access.rule"].create( { "name": "Agg Suppression Rule k10", "access_level": "aggregate", @@ -197,7 +197,7 @@ def test_statistic_suppression_uses_stricter_threshold(self): self.assertEqual(stat["value"], "<10") -class TestAggregationServiceAccessControl(AggregationTestCase): +class TestAnalyticsServiceAccessControl(AnalyticsTestCase): """Tests for access control in aggregation service.""" @classmethod @@ -216,7 +216,7 @@ def setUpClass(cls): def test_access_rule_aggregate_only(self): """Test that aggregate-only users cannot see IDs.""" # Create user-specific rule - self.env["spp.aggregation.access.rule"].create( + self.env["spp.analytics.access.rule"].create( { "name": "Test Aggregate Rule", "access_level": "aggregate", @@ -230,7 +230,7 @@ def test_access_rule_aggregate_only(self): ) # Run as test user - service = self.env["spp.aggregation.service"].with_user(self.test_user) + service = self.env["spp.analytics.service"].with_user(self.test_user) result = service.compute_aggregation(scope) self.assertEqual(result["access_level"], "aggregate") @@ -239,7 +239,7 @@ def test_access_rule_aggregate_only(self): def test_access_rule_restricts_inline_scopes(self): """Test that inline scopes can be restricted.""" # Create user-specific rule - self.env["spp.aggregation.access.rule"].create( + self.env["spp.analytics.access.rule"].create( { "name": "Test Restricted Rule", "access_level": "aggregate", @@ -250,7 +250,7 @@ def test_access_rule_restricts_inline_scopes(self): ) # Run as test user - service = self.env["spp.aggregation.service"].with_user(self.test_user) + service = self.env["spp.analytics.service"].with_user(self.test_user) # Inline scope should be blocked with self.assertRaises(AccessError): @@ -273,7 +273,7 @@ def test_access_rule_restricts_dimensions(self): if dimension: # Create user-specific rule with dimension restriction - self.env["spp.aggregation.access.rule"].create( + self.env["spp.analytics.access.rule"].create( { "name": "Test Dimension Rule", "access_level": "aggregate", @@ -288,7 +288,7 @@ def test_access_rule_restricts_dimensions(self): ) # Run as test user - service = self.env["spp.aggregation.service"].with_user(self.test_user) + service = self.env["spp.analytics.service"].with_user(self.test_user) # Request with 2 dimensions when max is 1 with self.assertRaises(ValidationError): @@ -298,7 +298,7 @@ def test_access_rule_restricts_dimensions(self): ) -class TestAggregationServicePublicUser(AggregationTestCase): +class TestAnalyticsServicePublicUser(AnalyticsTestCase): """Tests for aggregation service running as public user (uid:3). The GIS API runs as base.public_user which has no Odoo model permissions. @@ -313,7 +313,7 @@ def setUpClass(cls): def test_compute_aggregation_explicit_scope_as_public_user(self): """Test that public user can compute aggregation with explicit scope.""" - service = self.env["spp.aggregation.service"].with_user(self.public_user) + service = self.env["spp.analytics.service"].with_user(self.public_user) result = service.compute_aggregation( { "scope_type": "explicit", @@ -327,7 +327,7 @@ def test_compute_aggregation_explicit_scope_as_public_user(self): def test_statistics_computation_as_public_user(self): """Test that statistics are computed for public user.""" - service = self.env["spp.aggregation.service"].with_user(self.public_user) + service = self.env["spp.analytics.service"].with_user(self.public_user) result = service.compute_aggregation( { "scope_type": "explicit", @@ -342,7 +342,7 @@ def test_statistics_computation_as_public_user(self): def test_access_level_defaults_to_aggregate_for_public_user(self): """Test that public user gets aggregate access level by default.""" - service = self.env["spp.aggregation.service"].with_user(self.public_user) + service = self.env["spp.analytics.service"].with_user(self.public_user) result = service.compute_aggregation( { "scope_type": "explicit", @@ -354,7 +354,7 @@ def test_access_level_defaults_to_aggregate_for_public_user(self): def test_no_registrant_ids_in_result_for_public_user(self): """Test that registrant IDs are not exposed to public user.""" - service = self.env["spp.aggregation.service"].with_user(self.public_user) + service = self.env["spp.analytics.service"].with_user(self.public_user) result = service.compute_aggregation( { "scope_type": "explicit", diff --git a/spp_aggregation/tests/test_cache_service.py b/spp_analytics/tests/test_cache_service.py similarity index 95% rename from spp_aggregation/tests/test_cache_service.py rename to spp_analytics/tests/test_cache_service.py index 475db6ef..3cf4c24b 100644 --- a/spp_aggregation/tests/test_cache_service.py +++ b/spp_analytics/tests/test_cache_service.py @@ -5,19 +5,19 @@ from odoo import fields -from .common import AggregationTestCase +from .common import AnalyticsTestCase -class TestCacheService(AggregationTestCase): +class TestCacheService(AnalyticsTestCase): """Test cache service for aggregation results.""" @classmethod def setUpClass(cls): super().setUpClass() - cls.cache_service = cls.env["spp.aggregation.cache"] + cls.cache_service = cls.env["spp.analytics.cache"] # Create a test scope - cls.test_scope = cls.env["spp.aggregation.scope"].create( + cls.test_scope = cls.env["spp.analytics.scope"].create( { "name": "Test Cache Scope", "scope_type": "area", @@ -101,7 +101,7 @@ def test_store_and_retrieve_cache(self): def test_cache_disabled_for_spatial_queries(self): """Test that spatial queries are not cached.""" # Create spatial polygon scope - spatial_scope = self.env["spp.aggregation.scope"].create( + spatial_scope = self.env["spp.analytics.scope"].create( { "name": "Test Spatial Scope", "scope_type": "spatial_polygon", @@ -151,7 +151,7 @@ def test_cache_expiration(self): # Mock time to be 2 hours later (past TTL for area scope) future_time = fields.Datetime.now() + timedelta(hours=2) - with patch("odoo.addons.spp_aggregation.models.service_cache.fields.Datetime.now") as mock_now: + with patch("odoo.addons.spp_analytics.models.service_cache.fields.Datetime.now") as mock_now: mock_now.return_value = future_time # Try to retrieve - should be expired @@ -195,7 +195,7 @@ def test_invalidate_all(self): """Test invalidating all cache entries.""" # Create multiple scopes and cache results scope1 = self.test_scope - scope2 = self.env["spp.aggregation.scope"].create( + scope2 = self.env["spp.analytics.scope"].create( { "name": "Test Scope 2", "scope_type": "area", @@ -243,7 +243,7 @@ def test_cleanup_expired(self): self.cache_service.store_result(self.test_scope, statistics, group_by, result) # Manually update computed_at to be 2 hours ago - entry = self.env["spp.aggregation.cache.entry"].search([], limit=1) + entry = self.env["spp.analytics.cache.entry"].search([], limit=1) old_time = fields.Datetime.now() - timedelta(hours=2) entry.write({"computed_at": old_time}) diff --git a/spp_aggregation/tests/test_coverage.py b/spp_analytics/tests/test_coverage.py similarity index 54% rename from spp_aggregation/tests/test_coverage.py rename to spp_analytics/tests/test_coverage.py index ee1627ad..c1849e01 100644 --- a/spp_aggregation/tests/test_coverage.py +++ b/spp_analytics/tests/test_coverage.py @@ -1,5 +1,5 @@ # Part of OpenSPP. See LICENSE file for full copyright and licensing details. -"""Extended coverage tests for spp_aggregation module. +"""Extended coverage tests for spp_analytics module. Covers edge cases in access rules, cache key generation, scope resolver, and aggregation service convenience methods. @@ -9,18 +9,19 @@ from odoo.exceptions import ValidationError from odoo.tests import tagged +from odoo.tests.common import TransactionCase -from .common import AggregationTestCase +from .common import AnalyticsTestCase @tagged("post_install", "-at_install") -class TestAccessRuleValidation(AggregationTestCase): - """Tests for spp.aggregation.access.rule constraint and validation edge cases.""" +class TestAccessRuleValidation(AnalyticsTestCase): + """Tests for spp.analytics.access.rule constraint and validation edge cases.""" def test_constraint_both_user_and_group_raises(self): """Setting both user_id and group_id must raise ValidationError.""" with self.assertRaises(ValidationError): - self.env["spp.aggregation.access.rule"].create( + self.env["spp.analytics.access.rule"].create( { "name": "Invalid Rule", "access_level": "aggregate", @@ -38,7 +39,7 @@ def test_constraint_neither_user_nor_group_raises(self): def test_constraint_k_anonymity_below_1_raises(self): """minimum_k_anonymity < 1 must raise ValidationError.""" with self.assertRaises(ValidationError): - self.env["spp.aggregation.access.rule"].create( + self.env["spp.analytics.access.rule"].create( { "name": "K Too Low", "access_level": "aggregate", @@ -50,7 +51,7 @@ def test_constraint_k_anonymity_below_1_raises(self): def test_constraint_k_anonymity_above_100_raises(self): """minimum_k_anonymity > 100 must raise ValidationError.""" with self.assertRaises(ValidationError): - self.env["spp.aggregation.access.rule"].create( + self.env["spp.analytics.access.rule"].create( { "name": "K Too High", "access_level": "aggregate", @@ -62,7 +63,7 @@ def test_constraint_k_anonymity_above_100_raises(self): def test_constraint_max_dimensions_negative_raises(self): """max_group_by_dimensions < 0 must raise ValidationError.""" with self.assertRaises(ValidationError): - self.env["spp.aggregation.access.rule"].create( + self.env["spp.analytics.access.rule"].create( { "name": "Negative Dims", "access_level": "aggregate", @@ -74,7 +75,7 @@ def test_constraint_max_dimensions_negative_raises(self): def test_constraint_max_dimensions_above_10_raises(self): """max_group_by_dimensions > 10 must raise ValidationError.""" with self.assertRaises(ValidationError): - self.env["spp.aggregation.access.rule"].create( + self.env["spp.analytics.access.rule"].create( { "name": "Too Many Dims", "access_level": "aggregate", @@ -216,7 +217,7 @@ def test_get_effective_rule_user_over_group(self): } ) # Create group-based rule - self.env["spp.aggregation.access.rule"].create( + self.env["spp.analytics.access.rule"].create( { "name": "Group Rule", "access_level": "aggregate", @@ -226,7 +227,7 @@ def test_get_effective_rule_user_over_group(self): } ) # Create user-specific rule - user_rule = self.env["spp.aggregation.access.rule"].create( + user_rule = self.env["spp.analytics.access.rule"].create( { "name": "User Rule", "access_level": "individual", @@ -236,20 +237,20 @@ def test_get_effective_rule_user_over_group(self): } ) # User-specific rule should win regardless of sequence - AccessRule = self.env["spp.aggregation.access.rule"] + AccessRule = self.env["spp.analytics.access.rule"] effective = AccessRule.get_effective_rule_for_user(test_user) self.assertEqual(effective.id, user_rule.id) self.assertEqual(effective.access_level, "individual") @tagged("post_install", "-at_install") -class TestCacheServiceKeyGeneration(AggregationTestCase): +class TestCacheServiceKeyGeneration(AnalyticsTestCase): """Tests for cache key generation across all scope types.""" @classmethod def setUpClass(cls): super().setUpClass() - cls.cache_service = cls.env["spp.aggregation.cache"] + cls.cache_service = cls.env["spp.analytics.cache"] def test_scope_key_parts_dict_area(self): """Cache key parts for dict area scope must include area_id and children flag.""" @@ -336,7 +337,7 @@ def test_get_ttl_for_scope_type(self): def test_cron_cleanup_expired(self): """cron_cleanup_expired on cache.entry must delegate to cache service.""" - cache_entry_model = self.env["spp.aggregation.cache.entry"] + cache_entry_model = self.env["spp.analytics.cache.entry"] # Should run without error and return an integer result = cache_entry_model.cron_cleanup_expired() self.assertIsInstance(result, int) @@ -344,19 +345,19 @@ def test_cron_cleanup_expired(self): def test_store_result_serialization_error(self): """store_result must return False when result cannot be serialized.""" scope = self.create_scope("area", area_id=self.area_region.id) - with patch("odoo.addons.spp_aggregation.models.service_cache.json.dumps", side_effect=TypeError("bad")): + with patch("odoo.addons.spp_analytics.models.service_cache.json.dumps", side_effect=TypeError("bad")): stored = self.cache_service.store_result(scope, ["count"], [], {"total": 1}) self.assertFalse(stored) @tagged("post_install", "-at_install") -class TestScopeResolverEdgeCases(AggregationTestCase): +class TestScopeResolverEdgeCases(AnalyticsTestCase): """Tests for scope resolver edge cases and error handling.""" @classmethod def setUpClass(cls): super().setUpClass() - cls.resolver = cls.env["spp.aggregation.scope.resolver"] + cls.resolver = cls.env["spp.analytics.scope.resolver"] def test_resolve_inline_missing_scope_type(self): """Inline scope dict without scope_type must return empty list.""" @@ -403,13 +404,13 @@ def test_resolve_intersect_empty_scopes(self): @tagged("post_install", "-at_install") -class TestAggregationServiceExtended(AggregationTestCase): - """Extended tests for spp.aggregation.service convenience methods and scope resolution.""" +class TestAggregationServiceExtended(AnalyticsTestCase): + """Extended tests for spp.analytics.service convenience methods and scope resolution.""" @classmethod def setUpClass(cls): super().setUpClass() - cls.service = cls.env["spp.aggregation.service"] + cls.service = cls.env["spp.analytics.service"] def test_resolve_scope_dict(self): """_resolve_scope with dict must return the same dict.""" @@ -422,7 +423,7 @@ def test_resolve_scope_int(self): scope = self.create_scope("area", area_id=self.area_region.id) result = self.service._resolve_scope(scope.id) self.assertEqual(result.id, scope.id) - self.assertEqual(result._name, "spp.aggregation.scope") + self.assertEqual(result._name, "spp.analytics.scope") def test_resolve_scope_record(self): """_resolve_scope with record must return the same record.""" @@ -489,3 +490,332 @@ def test_check_scope_allowed_no_rule(self): # Should not raise -- no rule means default allow scope_dict = {"scope_type": "explicit", "explicit_partner_ids": self.registrants[:3].ids} service._check_scope_allowed(scope_dict) + + +@tagged("post_install", "-at_install") +class TestAnalyticsScopeActions(AnalyticsTestCase): + """Tests for action_preview_registrants, action_refresh_cache, and _check_area_tags.""" + + def test_action_preview_registrants_returns_act_window(self): + """action_preview_registrants must return an ir.actions.act_window dict.""" + scope = self.create_scope( + "explicit", + explicit_partner_ids=[(6, 0, self.registrants[:5].ids)], + ) + action = scope.action_preview_registrants() + self.assertEqual(action["type"], "ir.actions.act_window") + self.assertEqual(action["res_model"], "res.partner") + self.assertIn("list,form", action["view_mode"]) + + def test_action_preview_registrants_domain_contains_ids(self): + """action_preview_registrants domain must restrict to scope's registrant IDs.""" + partner_ids = self.registrants[:3].ids + scope = self.create_scope( + "explicit", + explicit_partner_ids=[(6, 0, partner_ids)], + ) + action = scope.action_preview_registrants() + domain = action["domain"] + # The domain should contain an 'id in [...]' filter + self.assertTrue(any(condition[0] == "id" for condition in domain)) + id_condition = next(c for c in domain if c[0] == "id") + self.assertEqual(set(id_condition[2]), set(partner_ids)) + + def test_action_preview_registrants_name_includes_scope_name(self): + """action_preview_registrants name must mention the scope name.""" + scope = self.create_scope( + "explicit", + name="My Preview Scope", + explicit_partner_ids=[(6, 0, self.registrants[:2].ids)], + ) + action = scope.action_preview_registrants() + self.assertIn("My Preview Scope", action["name"]) + + def test_action_preview_registrants_context_disables_create_delete(self): + """action_preview_registrants context must set create and delete to False.""" + scope = self.create_scope( + "explicit", + explicit_partner_ids=[(6, 0, self.registrants[:2].ids)], + ) + action = scope.action_preview_registrants() + self.assertFalse(action["context"].get("create")) + self.assertFalse(action["context"].get("delete")) + + def test_action_refresh_cache_returns_true(self): + """action_refresh_cache must return True.""" + scope = self.create_scope( + "explicit", + explicit_partner_ids=[(6, 0, self.registrants[:5].ids)], + ) + result = scope.action_refresh_cache() + self.assertTrue(result) + + def test_action_refresh_cache_updates_last_cache_refresh(self): + """action_refresh_cache must write last_cache_refresh timestamp.""" + scope = self.create_scope( + "explicit", + explicit_partner_ids=[(6, 0, self.registrants[:5].ids)], + ) + self.assertFalse(scope.last_cache_refresh) + scope.action_refresh_cache() + self.assertTrue(scope.last_cache_refresh) + + def test_check_area_tags_raises_when_no_tags(self): + """_check_area_tags must raise ValidationError when area_tag scope has no tags.""" + with self.assertRaises(ValidationError): + self.create_scope("area_tag") + + def test_check_area_tags_passes_with_tags(self): + """_check_area_tags must not raise when area_tag scope has tags.""" + scope = self.create_scope( + "area_tag", + area_tag_ids=[(6, 0, [self.tag_urban.id])], + ) + self.assertTrue(scope.id) + + def test_check_area_tags_raises_when_tags_cleared(self): + """_check_area_tags must raise if area_tags are cleared after creation.""" + scope = self.create_scope( + "area_tag", + area_tag_ids=[(6, 0, [self.tag_urban.id])], + ) + with self.assertRaises(ValidationError): + scope.write({"area_tag_ids": [(5, 0, 0)]}) + + +@tagged("post_install", "-at_install") +class TestAggregationServiceInternals(AnalyticsTestCase): + """Tests for internal methods of spp.analytics.service.""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.service = cls.env["spp.analytics.service"] + + def test_get_effective_rule_returns_none_when_no_rule(self): + """_get_effective_rule must return None (falsy) when no rule matches.""" + user_no_rule = self.env["res.users"].create( + { + "name": "Effective Rule Test User", + "login": "effective_rule_test_user", + "email": "effectiverule@test.com", + } + ) + service = self.service.with_user(user_no_rule) + rule = service._get_effective_rule() + self.assertFalse(rule) + + def test_get_effective_rule_returns_user_rule(self): + """_get_effective_rule must return the matching rule for the current user.""" + test_user = self.env["res.users"].create( + { + "name": "Rule Test User", + "login": "rule_test_user2", + "email": "ruletest2@test.com", + } + ) + created_rule = self.env["spp.analytics.access.rule"].create( + { + "name": "Effective Rule For User", + "access_level": "individual", + "user_id": test_user.id, + } + ) + service = self.service.with_user(test_user) + rule = service._get_effective_rule() + self.assertTrue(rule) + self.assertEqual(rule.id, created_rule.id) + + def test_access_level_from_rule_returns_rule_level(self): + """_access_level_from_rule must return the rule's access_level when rule present.""" + rule = self.create_access_rule("individual", user_id=self.env.user.id, group_id=False) + level = self.service._access_level_from_rule(rule) + self.assertEqual(level, "individual") + + def test_access_level_from_rule_defaults_to_aggregate_when_none(self): + """_access_level_from_rule must return 'aggregate' when rule is None/falsy.""" + level = self.service._access_level_from_rule(None) + self.assertEqual(level, "aggregate") + + def test_k_threshold_from_rule_returns_rule_threshold(self): + """_k_threshold_from_rule must return minimum_k_anonymity from rule.""" + rule = self.create_access_rule( + "aggregate", + user_id=self.env.user.id, + group_id=False, + minimum_k_anonymity=7, + ) + threshold = self.service._k_threshold_from_rule(rule) + self.assertEqual(threshold, 7) + + def test_k_threshold_from_rule_uses_privacy_default_when_none(self): + """_k_threshold_from_rule must fall back to privacy service default when no rule.""" + threshold = self.service._k_threshold_from_rule(None) + privacy_default = self.env["spp.metric.privacy"].DEFAULT_K_THRESHOLD + self.assertEqual(threshold, privacy_default) + + def test_compute_single_statistic_delegates_to_registry(self): + """_compute_single_statistic must delegate to indicator registry.""" + ids = self.registrants[:5].ids + result = self.service._compute_single_statistic("count", ids) + self.assertEqual(result, 5) + + def test_compute_single_statistic_returns_none_for_unknown(self): + """_compute_single_statistic must return None for an unknown statistic.""" + result = self.service._compute_single_statistic("unknown_stat_xyz", self.registrants[:3].ids) + self.assertIsNone(result) + + def test_compute_breakdown_returns_dict(self): + """_compute_breakdown must return a dict with breakdown cells.""" + ids = self.registrants.ids + result = self.service._compute_breakdown(ids, ["registrant_type"], None, None) + self.assertIsInstance(result, dict) + self.assertGreater(len(result), 0) + + def test_compute_breakdown_with_statistics(self): + """_compute_breakdown with statistics must include stats per cell.""" + ids = self.registrants.ids + result = self.service._compute_breakdown(ids, ["registrant_type"], ["count"], None) + self.assertIsInstance(result, dict) + # Each cell should have a count key + for _cell_key, cell in result.items(): + self.assertIn("count", cell) + + +@tagged("post_install", "-at_install") +class TestScopeResolverCelMethods(AnalyticsTestCase): + """Tests for CEL resolution methods in spp.analytics.scope.resolver.""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.resolver = cls.env["spp.analytics.scope.resolver"] + + def test_resolve_cel_record_without_executor_returns_empty(self): + """_resolve_cel with a CEL scope record returns empty when executor unavailable.""" + scope = self.create_scope( + "cel", + cel_expression="r.is_group == false", + cel_profile="registry_individuals", + ) + # When spp.cel.executor is not installed the resolver logs an error and returns [] + if self.env.get("spp.cel.executor") is None: + result = self.resolver._resolve_cel(scope) + self.assertEqual(result, []) + else: + # Module IS installed; just check it returns a list + result = self.resolver._resolve_cel(scope) + self.assertIsInstance(result, list) + + def test_resolve_cel_record_empty_expression_returns_empty(self): + """_resolve_cel_expression with empty string must return empty list.""" + result = self.resolver._resolve_cel_expression("", "registry_individuals") + self.assertEqual(result, []) + + def test_resolve_cel_inline_without_executor_returns_empty(self): + """_resolve_cel_inline with an inline CEL scope returns empty when executor unavailable.""" + scope_dict = { + "scope_type": "cel", + "cel_expression": "r.is_group == false", + "cel_profile": "registry_individuals", + } + if self.env.get("spp.cel.executor") is None: + result = self.resolver._resolve_cel_inline(scope_dict) + self.assertEqual(result, []) + else: + result = self.resolver._resolve_cel_inline(scope_dict) + self.assertIsInstance(result, list) + + def test_resolve_cel_inline_missing_expression_returns_empty(self): + """_resolve_cel_inline with missing cel_expression returns empty list.""" + scope_dict = { + "scope_type": "cel", + # No cel_expression key + } + result = self.resolver._resolve_cel_inline(scope_dict) + self.assertEqual(result, []) + + def test_resolve_dispatches_to_resolve_cel(self): + """resolve() on a CEL record scope must call the CEL resolver, not raise.""" + scope = self.create_scope( + "cel", + cel_expression="r.is_group == false", + ) + # Should not raise; returns a list (possibly empty if executor missing) + result = self.resolver.resolve(scope) + self.assertIsInstance(result, list) + + +@tagged("post_install", "-at_install") +class TestIndicatorRegistryInternals(TransactionCase): + """Tests for internal dispatch methods of spp.analytics.indicator.registry.""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.indicator_registry = cls.env["spp.analytics.indicator.registry"] + cls.registrants = cls.env["res.partner"] + for i in range(10): + cls.registrants |= cls.env["res.partner"].create( + { + "name": f"Registry Internals Test {i}", + "is_registrant": True, + } + ) + + def test_get_builtin_returns_method_for_count(self): + """_get_builtin('count') must return a callable.""" + method = self.indicator_registry._get_builtin("count") + self.assertIsNotNone(method) + self.assertTrue(callable(method)) + + def test_get_builtin_returns_method_for_gini(self): + """_get_builtin('gini') must return a callable.""" + method = self.indicator_registry._get_builtin("gini") + self.assertIsNotNone(method) + self.assertTrue(callable(method)) + + def test_get_builtin_returns_method_for_gini_coefficient_alias(self): + """_get_builtin('gini_coefficient') must return a callable (alias).""" + method = self.indicator_registry._get_builtin("gini_coefficient") + self.assertIsNotNone(method) + self.assertTrue(callable(method)) + + def test_get_builtin_returns_none_for_unknown(self): + """_get_builtin must return None for an unregistered statistic name.""" + method = self.indicator_registry._get_builtin("no_such_stat") + self.assertIsNone(method) + + def test_get_builtin_count_callable_returns_correct_count(self): + """The callable returned by _get_builtin('count') must compute correctly.""" + method = self.indicator_registry._get_builtin("count") + result = method(self.registrants[:7].ids) + self.assertEqual(result, 7) + + def test_try_statistic_model_returns_none_when_model_absent(self): + """_try_statistic_model returns None when spp.indicator is not installed.""" + if self.env.get("spp.indicator") is not None: + self.skipTest("spp_indicator is installed; testing absence is not possible") + result = self.indicator_registry._try_statistic_model("some_stat", self.registrants.ids) + self.assertIsNone(result) + + def test_try_statistic_model_returns_none_for_missing_stat(self): + """_try_statistic_model returns None when named statistic record does not exist.""" + if self.env.get("spp.indicator") is None: + self.skipTest("spp_indicator not installed") + result = self.indicator_registry._try_statistic_model("nonexistent_stat_abc", self.registrants.ids) + self.assertIsNone(result) + + def test_try_variable_model_returns_none_when_model_absent(self): + """_try_variable_model returns None when spp.cel.variable is not installed.""" + if self.env.get("spp.cel.variable") is not None: + self.skipTest("spp_cel is installed; testing absence is not possible") + result = self.indicator_registry._try_variable_model("some_var", self.registrants.ids) + self.assertIsNone(result) + + def test_try_variable_model_returns_none_for_missing_variable(self): + """_try_variable_model returns None when named variable record does not exist.""" + if self.env.get("spp.cel.variable") is None: + self.skipTest("spp_cel not installed") + result = self.indicator_registry._try_variable_model("nonexistent_variable_xyz", self.registrants.ids) + self.assertIsNone(result) diff --git a/spp_aggregation/tests/test_distribution_service.py b/spp_analytics/tests/test_distribution_service.py similarity index 97% rename from spp_aggregation/tests/test_distribution_service.py rename to spp_analytics/tests/test_distribution_service.py index ad7721cd..6a1710d0 100644 --- a/spp_aggregation/tests/test_distribution_service.py +++ b/spp_analytics/tests/test_distribution_service.py @@ -3,12 +3,12 @@ class TestDistributionService(TransactionCase): - """Tests for spp.metrics.distribution service.""" + """Tests for spp.metric.distribution service.""" @classmethod def setUpClass(cls): super().setUpClass() - cls.service = cls.env["spp.metrics.distribution"] + cls.service = cls.env["spp.metric.distribution"] def test_empty_amounts(self): """Test distribution with empty list.""" diff --git a/spp_aggregation/tests/test_fairness_service.py b/spp_analytics/tests/test_fairness_service.py similarity index 94% rename from spp_aggregation/tests/test_fairness_service.py rename to spp_analytics/tests/test_fairness_service.py index d310e3ee..cfcb5667 100644 --- a/spp_aggregation/tests/test_fairness_service.py +++ b/spp_analytics/tests/test_fairness_service.py @@ -1,14 +1,14 @@ # Part of OpenSPP. See LICENSE file for full copyright and licensing details. -from .common import AggregationTestCase +from .common import AnalyticsTestCase -class TestFairnessService(AggregationTestCase): - """Tests for spp.metrics.fairness service.""" +class TestFairnessService(AnalyticsTestCase): + """Tests for spp.metric.fairness service.""" @classmethod def setUpClass(cls): super().setUpClass() - cls.service = cls.env["spp.metrics.fairness"] + cls.service = cls.env["spp.metric.fairness"] def test_empty_registrants(self): """Test fairness with no registrants.""" diff --git a/spp_aggregation/tests/test_statistic_registry.py b/spp_analytics/tests/test_indicator_registry.py similarity index 94% rename from spp_aggregation/tests/test_statistic_registry.py rename to spp_analytics/tests/test_indicator_registry.py index e87a98ea..f0bab05c 100644 --- a/spp_aggregation/tests/test_statistic_registry.py +++ b/spp_analytics/tests/test_indicator_registry.py @@ -3,12 +3,12 @@ class TestStatisticRegistry(TransactionCase): - """Tests for spp.aggregation.statistic.registry.""" + """Tests for spp.analytics.indicator.registry.""" @classmethod def setUpClass(cls): super().setUpClass() - cls.stat_registry = cls.env["spp.aggregation.statistic.registry"] + cls.stat_registry = cls.env["spp.analytics.indicator.registry"] # Create test registrants cls.registrants = cls.env["res.partner"] @@ -42,7 +42,7 @@ def test_compute_gini_coefficient_builtin(self): def test_compute_unknown_statistic(self): """Test that unknown statistic returns None with warning.""" - with self.assertLogs("odoo.addons.spp_aggregation.models.statistic_registry", level="WARNING") as log: + with self.assertLogs("odoo.addons.spp_analytics.models.indicator_registry", level="WARNING") as log: result = self.stat_registry.compute("nonexistent_stat", self.registrants.ids) self.assertIsNone(result) self.assertTrue(any("Unknown statistic: nonexistent_stat" in msg for msg in log.output)) @@ -64,12 +64,12 @@ def test_list_available_includes_builtins(self): class TestStatisticRegistryIntegration(TransactionCase): - """Integration tests for statistic registry with spp.statistic and spp.cel.variable.""" + """Integration tests for statistic registry with spp.indicator and spp.cel.variable.""" @classmethod def setUpClass(cls): super().setUpClass() - cls.stat_registry = cls.env["spp.aggregation.statistic.registry"] + cls.stat_registry = cls.env["spp.analytics.indicator.registry"] # Create test registrants cls.registrants = cls.env["res.partner"] @@ -106,9 +106,9 @@ def test_compute_from_cel_variable(self): self.assertGreater(result, 0) def test_compute_from_statistic_model(self): - """Test computing statistic from spp.statistic via variable.""" - if "spp.statistic" not in self.env: - self.skipTest("spp_statistic module not installed") + """Test computing statistic from spp.indicator via variable.""" + if "spp.indicator" not in self.env: + self.skipTest("spp_indicator module not installed") if "spp.cel.variable" not in self.env: self.skipTest("spp_cel module not installed") @@ -125,7 +125,7 @@ def test_compute_from_statistic_model(self): ) # Create statistic that uses the variable - self.env["spp.statistic"].create( + self.env["spp.indicator"].create( { "name": "test_registry_stat", "label": "Test Registry Statistic", @@ -142,9 +142,9 @@ def test_compute_from_statistic_model(self): self.assertGreater(result, 0) def test_list_available_includes_statistics(self): - """Test that list_available includes spp.statistic records.""" - if "spp.statistic" not in self.env: - self.skipTest("spp_statistic module not installed") + """Test that list_available includes spp.indicator records.""" + if "spp.indicator" not in self.env: + self.skipTest("spp_indicator module not installed") if "spp.cel.variable" not in self.env: self.skipTest("spp_cel module not installed") @@ -159,7 +159,7 @@ def test_list_available_includes_statistics(self): "state": "active", } ) - self.env["spp.statistic"].create( + self.env["spp.indicator"].create( { "name": "test_list_stat", "label": "Test List Statistic", @@ -261,7 +261,7 @@ class TestStatisticRegistryMemberAggregate(TransactionCase): @classmethod def setUpClass(cls): super().setUpClass() - cls.stat_registry = cls.env["spp.aggregation.statistic.registry"] + cls.stat_registry = cls.env["spp.analytics.indicator.registry"] if "spp.cel.variable" not in cls.env: return @@ -391,7 +391,7 @@ class TestStatisticRegistryViaService(TransactionCase): @classmethod def setUpClass(cls): super().setUpClass() - cls.service = cls.env["spp.aggregation.service"] + cls.service = cls.env["spp.analytics.service"] cls.registrants = cls.env["res.partner"] for i in range(5): cls.registrants |= cls.env["res.partner"].create( diff --git a/spp_aggregation/tests/test_integration_demo.py b/spp_analytics/tests/test_integration_demo.py similarity index 98% rename from spp_aggregation/tests/test_integration_demo.py rename to spp_analytics/tests/test_integration_demo.py index 8c5feb17..f8dd4365 100644 --- a/spp_aggregation/tests/test_integration_demo.py +++ b/spp_analytics/tests/test_integration_demo.py @@ -63,8 +63,8 @@ def setUpClass(cls): _logger.info("MIS demo data generation complete") # Get references to key models - cls.service = cls.env["spp.aggregation.service"] - cls.scope_model = cls.env["spp.aggregation.scope"] + cls.service = cls.env["spp.analytics.service"] + cls.scope_model = cls.env["spp.analytics.scope"] cls.area_model = cls.env["spp.area"] cls.partner_model = cls.env["res.partner"] cls.dimension_model = cls.env["spp.demographic.dimension"] @@ -254,7 +254,7 @@ def test_k_anonymity_suppression_with_realistic_data(self): ) # Create access rule for aggregate-only access with k=5 - self.env["spp.aggregation.access.rule"].create( + self.env["spp.analytics.access.rule"].create( { "name": "Researcher Aggregate Access", "access_level": "aggregate", @@ -428,7 +428,7 @@ def test_privacy_differencing_attack_prevention(self): ) # Create strict access rule with k=10 - self.env["spp.aggregation.access.rule"].create( + self.env["spp.analytics.access.rule"].create( { "name": "Strict Aggregate Access", "access_level": "aggregate", @@ -533,7 +533,7 @@ def test_spatial_aggregation_with_gps_coordinates(self): self.assertGreater(result["total_count"], 0, "Should find registrants in spatial scope") # Verify sample registrant is included - scope_registrant_ids = self.env["spp.aggregation.scope.resolver"].resolve_scope(scope) + scope_registrant_ids = self.env["spp.analytics.scope.resolver"].resolve_scope(scope) self.assertIn( sample_registrant.id, scope_registrant_ids, @@ -637,7 +637,7 @@ def test_complementary_suppression_across_dimensions(self): } ) - self.env["spp.aggregation.access.rule"].create( + self.env["spp.analytics.access.rule"].create( { "name": "Complement Test Access", "access_level": "aggregate", diff --git a/spp_aggregation/tests/test_privacy_enforcement.py b/spp_analytics/tests/test_privacy_enforcement.py similarity index 98% rename from spp_aggregation/tests/test_privacy_enforcement.py rename to spp_analytics/tests/test_privacy_enforcement.py index 7966a6db..93bc56cb 100644 --- a/spp_aggregation/tests/test_privacy_enforcement.py +++ b/spp_analytics/tests/test_privacy_enforcement.py @@ -3,12 +3,12 @@ class TestPrivacyEnforcement(TransactionCase): - """Tests for spp.metrics.privacy service.""" + """Tests for spp.metric.privacy service.""" @classmethod def setUpClass(cls): super().setUpClass() - cls.service = cls.env["spp.metrics.privacy"] + cls.service = cls.env["spp.metric.privacy"] def test_enforce_empty_result(self): """Test enforcement on empty result.""" @@ -150,7 +150,7 @@ class TestPrivacyKAnonymityAttacks(TransactionCase): @classmethod def setUpClass(cls): super().setUpClass() - cls.service = cls.env["spp.metrics.privacy"] + cls.service = cls.env["spp.metric.privacy"] def test_differencing_attack_prevention_single_sibling(self): """ diff --git a/spp_aggregation/tests/test_scope_builder.py b/spp_analytics/tests/test_scope_builder.py similarity index 93% rename from spp_aggregation/tests/test_scope_builder.py rename to spp_analytics/tests/test_scope_builder.py index 92fbde1a..48ccfbf9 100644 --- a/spp_aggregation/tests/test_scope_builder.py +++ b/spp_analytics/tests/test_scope_builder.py @@ -1,16 +1,16 @@ # Part of OpenSPP. See LICENSE file for full copyright and licensing details. """Tests for shared scope builder utilities.""" -from odoo.addons.spp_aggregation.services import ( +from odoo.addons.spp_analytics.services import ( build_area_scope, build_cel_scope, build_explicit_scope, ) -from .common import AggregationTestCase +from .common import AnalyticsTestCase -class TestScopeBuilder(AggregationTestCase): +class TestScopeBuilder(AnalyticsTestCase): """Tests for scope builder utility functions.""" def test_build_explicit_scope_with_list(self): @@ -86,7 +86,7 @@ def test_explicit_scope_resolves_correctly(self): scope = build_explicit_scope(test_registrants.ids) # Resolve the scope using the resolver - resolver = self.env["spp.aggregation.scope.resolver"] + resolver = self.env["spp.analytics.scope.resolver"] resolved_ids = resolver.resolve(scope) self.assertEqual(set(resolved_ids), set(test_registrants.ids)) @@ -96,7 +96,7 @@ def test_area_scope_resolves_correctly(self): scope = build_area_scope(area_id=self.area_district.id, include_children=False) # Resolve the scope using the resolver - resolver = self.env["spp.aggregation.scope.resolver"] + resolver = self.env["spp.analytics.scope.resolver"] resolved_ids = resolver.resolve(scope) # Check all returned IDs are registrants in the district @@ -111,7 +111,7 @@ def test_scope_compatible_with_aggregation_service(self): scope = build_explicit_scope(test_registrants.ids) # Compute aggregation using the scope - aggregation_service = self.env["spp.aggregation.service"] + aggregation_service = self.env["spp.analytics.service"] result = aggregation_service.compute_aggregation( scope=scope, statistics=["count"], diff --git a/spp_aggregation/tests/test_scope_resolver.py b/spp_analytics/tests/test_scope_resolver.py similarity index 84% rename from spp_aggregation/tests/test_scope_resolver.py rename to spp_analytics/tests/test_scope_resolver.py index d489a8e9..b94e4488 100644 --- a/spp_aggregation/tests/test_scope_resolver.py +++ b/spp_analytics/tests/test_scope_resolver.py @@ -1,9 +1,9 @@ # Part of OpenSPP. See LICENSE file for full copyright and licensing details. -from .common import AggregationTestCase +from .common import AnalyticsTestCase -class TestScopeResolver(AggregationTestCase): - """Tests for spp.aggregation.scope.resolver service.""" +class TestScopeResolver(AnalyticsTestCase): + """Tests for spp.analytics.scope.resolver service.""" def test_resolve_explicit_scope(self): """Test resolving explicit partner IDs.""" @@ -11,13 +11,13 @@ def test_resolve_explicit_scope(self): "explicit", explicit_partner_ids=[(6, 0, self.registrants[:5].ids)], ) - resolver = self.env["spp.aggregation.scope.resolver"] + resolver = self.env["spp.analytics.scope.resolver"] ids = resolver.resolve(scope) self.assertEqual(set(ids), set(self.registrants[:5].ids)) def test_resolve_inline_explicit_scope(self): """Test resolving inline explicit scope definition.""" - resolver = self.env["spp.aggregation.scope.resolver"] + resolver = self.env["spp.analytics.scope.resolver"] ids = resolver.resolve( { "scope_type": "explicit", @@ -33,7 +33,7 @@ def test_resolve_area_scope(self): area_id=self.area_district.id, include_child_areas=False, ) - resolver = self.env["spp.aggregation.scope.resolver"] + resolver = self.env["spp.analytics.scope.resolver"] ids = resolver.resolve(scope) # Check all returned IDs are registrants in the district @@ -57,7 +57,7 @@ def test_resolve_area_scope_with_children(self): area_id=self.area_region.id, include_child_areas=True, ) - resolver = self.env["spp.aggregation.scope.resolver"] + resolver = self.env["spp.analytics.scope.resolver"] ids = resolver.resolve(scope) # Should include registrants from both region and district @@ -72,7 +72,7 @@ def test_resolve_area_tag_scope(self): "area_tag", area_tag_ids=[(6, 0, [self.tag_urban.id])], ) - resolver = self.env["spp.aggregation.scope.resolver"] + resolver = self.env["spp.analytics.scope.resolver"] ids = resolver.resolve(scope) # Should find registrants in urban-tagged areas @@ -91,7 +91,7 @@ def test_resolve_multiple_scopes_union(self): explicit_partner_ids=[(6, 0, self.registrants[2:5].ids)], ) - resolver = self.env["spp.aggregation.scope.resolver"] + resolver = self.env["spp.analytics.scope.resolver"] ids = resolver.resolve_multiple([scope1, scope2]) # Union: 0,1,2 + 2,3,4 = 0,1,2,3,4 @@ -110,7 +110,7 @@ def test_resolve_multiple_scopes_intersect(self): explicit_partner_ids=[(6, 0, self.registrants[3:8].ids)], ) - resolver = self.env["spp.aggregation.scope.resolver"] + resolver = self.env["spp.analytics.scope.resolver"] ids = resolver.resolve_intersect([scope1, scope2]) # Intersection: 0-4 ∩ 3-7 = 3,4 @@ -118,7 +118,7 @@ def test_resolve_multiple_scopes_intersect(self): def test_resolve_empty_scope_returns_empty(self): """Test that resolving empty scopes returns empty list.""" - resolver = self.env["spp.aggregation.scope.resolver"] + resolver = self.env["spp.analytics.scope.resolver"] ids = resolver.resolve_multiple([]) self.assertEqual(ids, []) @@ -128,14 +128,14 @@ def test_resolve_spatial_polygon_without_bridge(self): "spatial_polygon", geometry_geojson='{"type": "Polygon", "coordinates": [[[0,0],[1,0],[1,1],[0,1],[0,0]]]}', ) - resolver = self.env["spp.aggregation.scope.resolver"] + resolver = self.env["spp.analytics.scope.resolver"] ids = resolver.resolve(scope) # Without PostGIS bridge, returns empty self.assertEqual(ids, []) def test_resolve_inline_area_scope(self): """Test resolving inline area scope definition.""" - resolver = self.env["spp.aggregation.scope.resolver"] + resolver = self.env["spp.analytics.scope.resolver"] ids = resolver.resolve( { "scope_type": "area", @@ -146,7 +146,7 @@ def test_resolve_inline_area_scope(self): self.assertGreater(len(ids), 0) -class TestScopeResolverPublicUser(AggregationTestCase): +class TestScopeResolverPublicUser(AnalyticsTestCase): """Tests for scope resolver running as public user (uid:3). The scope resolver must work for unprivileged callers because it uses @@ -160,7 +160,7 @@ def setUpClass(cls): def test_resolve_explicit_scope_as_public_user(self): """Test that explicit scope resolution works as public user.""" - resolver = self.env["spp.aggregation.scope.resolver"].with_user(self.public_user) + resolver = self.env["spp.analytics.scope.resolver"].with_user(self.public_user) ids = resolver.resolve( { "scope_type": "explicit", @@ -171,7 +171,7 @@ def test_resolve_explicit_scope_as_public_user(self): def test_resolve_area_scope_as_public_user(self): """Test that area scope resolution works as public user.""" - resolver = self.env["spp.aggregation.scope.resolver"].with_user(self.public_user) + resolver = self.env["spp.analytics.scope.resolver"].with_user(self.public_user) ids = resolver.resolve( { "scope_type": "area", @@ -183,7 +183,7 @@ def test_resolve_area_scope_as_public_user(self): def test_resolve_area_tag_scope_as_public_user(self): """Test that area tag scope resolution works as public user.""" - resolver = self.env["spp.aggregation.scope.resolver"].with_user(self.public_user) + resolver = self.env["spp.analytics.scope.resolver"].with_user(self.public_user) ids = resolver.resolve( { "scope_type": "area_tag", diff --git a/spp_aggregation/views/aggregation_access_views.xml b/spp_analytics/views/analytics_access_views.xml similarity index 92% rename from spp_aggregation/views/aggregation_access_views.xml rename to spp_analytics/views/analytics_access_views.xml index 9ca48d05..a6777b5d 100644 --- a/spp_aggregation/views/aggregation_access_views.xml +++ b/spp_analytics/views/analytics_access_views.xml @@ -2,8 +2,8 @@ - spp.aggregation.access.rule.form - spp.aggregation.access.rule + spp.analytics.access.rule.form + spp.analytics.access.rule
@@ -77,8 +77,8 @@ - spp.aggregation.access.rule.list - spp.aggregation.access.rule + spp.analytics.access.rule.list + spp.analytics.access.rule @@ -94,8 +94,8 @@ - spp.aggregation.access.rule.search - spp.aggregation.access.rule + spp.analytics.access.rule.search + spp.analytics.access.rule @@ -131,7 +131,7 @@ Access Rules - spp.aggregation.access.rule + spp.analytics.access.rule list,form

diff --git a/spp_aggregation/views/aggregation_scope_views.xml b/spp_analytics/views/analytics_scope_views.xml similarity index 95% rename from spp_aggregation/views/aggregation_scope_views.xml rename to spp_analytics/views/analytics_scope_views.xml index 307b641e..77e7e48a 100644 --- a/spp_aggregation/views/aggregation_scope_views.xml +++ b/spp_analytics/views/analytics_scope_views.xml @@ -2,8 +2,8 @@ - spp.aggregation.scope.form - spp.aggregation.scope + spp.analytics.scope.form + spp.analytics.scope @@ -160,8 +160,8 @@ - spp.aggregation.scope.list - spp.aggregation.scope + spp.analytics.scope.list + spp.analytics.scope @@ -174,8 +174,8 @@ - spp.aggregation.scope.search - spp.aggregation.scope + spp.analytics.scope.search + spp.analytics.scope @@ -215,7 +215,7 @@ Aggregation Scopes - spp.aggregation.scope + spp.analytics.scope list,form

diff --git a/spp_aggregation/views/menu.xml b/spp_analytics/views/menu.xml similarity index 91% rename from spp_aggregation/views/menu.xml rename to spp_analytics/views/menu.xml index 6ba2d80c..e92dc13c 100644 --- a/spp_aggregation/views/menu.xml +++ b/spp_analytics/views/menu.xml @@ -3,7 +3,7 @@ diff --git a/spp_api_v2_gis/README.rst b/spp_api_v2_gis/README.rst index 14c61f2e..4d45ed07 100644 --- a/spp_api_v2_gis/README.rst +++ b/spp_api_v2_gis/README.rst @@ -107,7 +107,7 @@ individual registrant records. percentages). Each feature represents an *area*, not a person. - **Spatial query statistics** (``POST /gis/query/statistics``): Accepts a GeoJSON polygon and returns configured aggregate statistics computed - by ``spp.aggregation.service``. Individual registrant IDs are computed + by ``spp.analytics.service``. Individual registrant IDs are computed internally for aggregation but are **explicitly stripped** from the response before it is sent (see ``spatial_query.py``). - **Exports** (GeoPackage/GeoJSON): Contain the same area-level @@ -123,7 +123,7 @@ individual registrant records. re-identification in small populations. - **CEL variable configuration**: Administrators control which statistics are published and their suppression thresholds via - ``spp.statistic`` records. + ``spp.indicator`` records. - **Scope separation**: ``gis:read`` and ``gis:geofence`` are separate scopes, allowing clients to be granted read-only access without write capability. diff --git a/spp_api_v2_gis/__manifest__.py b/spp_api_v2_gis/__manifest__.py index ffdc88ba..2aad2122 100644 --- a/spp_api_v2_gis/__manifest__.py +++ b/spp_api_v2_gis/__manifest__.py @@ -16,8 +16,8 @@ "spp_area", "spp_hazard", "spp_vocabulary", - "spp_statistic", - "spp_aggregation", + "spp_indicator", + "spp_analytics", ], "data": [ "security/ir.model.access.csv", diff --git a/spp_api_v2_gis/readme/DESCRIPTION.md b/spp_api_v2_gis/readme/DESCRIPTION.md index 5450afcd..b9bfd937 100644 --- a/spp_api_v2_gis/readme/DESCRIPTION.md +++ b/spp_api_v2_gis/readme/DESCRIPTION.md @@ -54,14 +54,14 @@ Follows thin client architecture where QGIS displays data and OpenSPP performs a **Aggregated statistics only.** No endpoint in this module returns individual registrant records. - **OGC collections/items**: Return GeoJSON features organized by administrative area, with pre-computed aggregate values (counts, percentages). Each feature represents an *area*, not a person. -- **Spatial query statistics** (`POST /gis/query/statistics`): Accepts a GeoJSON polygon and returns configured aggregate statistics computed by `spp.aggregation.service`. Individual registrant IDs are computed internally for aggregation but are **explicitly stripped** from the response before it is sent (see `spatial_query.py`). +- **Spatial query statistics** (`POST /gis/query/statistics`): Accepts a GeoJSON polygon and returns configured aggregate statistics computed by `spp.analytics.service`. Individual registrant IDs are computed internally for aggregation but are **explicitly stripped** from the response before it is sent (see `spatial_query.py`). - **Exports** (GeoPackage/GeoJSON): Contain the same area-level aggregated layer data, not registrant-level records. - **Geofences**: Store only geometry and metadata — no registrant data. **Privacy controls** - **K-anonymity suppression**: Statistics backed by CEL variables can apply k-anonymity thresholds. When a cell count falls below the configured minimum, the value is replaced with a suppression marker and flagged as `"suppressed": true` in the response. This prevents re-identification in small populations. -- **CEL variable configuration**: Administrators control which statistics are published and their suppression thresholds via `spp.statistic` records. +- **CEL variable configuration**: Administrators control which statistics are published and their suppression thresholds via `spp.indicator` records. - **Scope separation**: `gis:read` and `gis:geofence` are separate scopes, allowing clients to be granted read-only access without write capability. **Design rationale** diff --git a/spp_api_v2_gis/routers/statistics.py b/spp_api_v2_gis/routers/statistics.py index 48e84c5e..976f6789 100644 --- a/spp_api_v2_gis/routers/statistics.py +++ b/spp_api_v2_gis/routers/statistics.py @@ -12,9 +12,9 @@ from fastapi import APIRouter, Depends, HTTPException, status from ..schemas.statistics import ( - StatisticCategoryInfo, - StatisticInfo, - StatisticsListResponse, + IndicatorCategoryInfo, + IndicatorInfo, + IndicatorsListResponse, ) _logger = logging.getLogger(__name__) @@ -26,12 +26,12 @@ "/statistics", summary="List published GIS statistics", description="Returns all statistics published for GIS context, grouped by category.", - response_model=StatisticsListResponse, + response_model=IndicatorsListResponse, ) async def list_statistics( env: Annotated[Environment, Depends(odoo_env)], api_client: Annotated[dict, Depends(get_authenticated_client)], -) -> StatisticsListResponse: +) -> IndicatorsListResponse: """List all GIS-published statistics grouped by category. Used by the QGIS plugin to discover what statistics are available @@ -46,7 +46,7 @@ async def list_statistics( try: # nosemgrep: odoo-sudo-without-context - Statistic = env["spp.statistic"].sudo() + Statistic = env["spp.indicator"].sudo() stats_by_category = Statistic.get_published_by_category("gis") categories = [] @@ -60,7 +60,7 @@ async def list_statistics( for stat in stat_records: config = stat.get_context_config("gis") stat_items.append( - StatisticInfo( + IndicatorInfo( name=stat.name, label=config.get("label", stat.label), description=stat.description, @@ -70,7 +70,7 @@ async def list_statistics( ) categories.append( - StatisticCategoryInfo( + IndicatorCategoryInfo( code=category_code, name=category_record.name if category_record else category_code.replace("_", " ").title(), icon=getattr(category_record, "icon", None) if category_record else None, @@ -79,7 +79,7 @@ async def list_statistics( ) total_count += len(stat_items) - return StatisticsListResponse( + return IndicatorsListResponse( categories=categories, total_count=total_count, ) diff --git a/spp_api_v2_gis/schemas/statistics.py b/spp_api_v2_gis/schemas/statistics.py index 5e846da2..6184a998 100644 --- a/spp_api_v2_gis/schemas/statistics.py +++ b/spp_api_v2_gis/schemas/statistics.py @@ -4,7 +4,7 @@ from pydantic import BaseModel, Field -class StatisticInfo(BaseModel): +class IndicatorInfo(BaseModel): """Information about a single published statistic.""" name: str = Field(..., description="Technical name (e.g., 'children_under_5')") @@ -14,17 +14,17 @@ class StatisticInfo(BaseModel): unit: str | None = Field(default=None, description="Unit of measurement") -class StatisticCategoryInfo(BaseModel): +class IndicatorCategoryInfo(BaseModel): """Information about a category of statistics.""" code: str = Field(..., description="Category code (e.g., 'demographics')") name: str = Field(..., description="Display name (e.g., 'Demographics')") icon: str | None = Field(default=None, description="Font Awesome icon class") - statistics: list[StatisticInfo] = Field(..., description="Statistics in this category") + statistics: list[IndicatorInfo] = Field(..., description="Statistics in this category") -class StatisticsListResponse(BaseModel): +class IndicatorsListResponse(BaseModel): """Response listing all published statistics for a context.""" - categories: list[StatisticCategoryInfo] = Field(..., description="Statistics organized by category") + categories: list[IndicatorCategoryInfo] = Field(..., description="Statistics organized by category") total_count: int = Field(..., description="Total number of statistics across all categories") diff --git a/spp_api_v2_gis/services/spatial_query_service.py b/spp_api_v2_gis/services/spatial_query_service.py index a538f9bf..6c49aba8 100644 --- a/spp_api_v2_gis/services/spatial_query_service.py +++ b/spp_api_v2_gis/services/spatial_query_service.py @@ -4,7 +4,7 @@ import json import logging -from odoo.addons.spp_aggregation.services import build_explicit_scope +from odoo.addons.spp_analytics.services import build_explicit_scope _logger = logging.getLogger(__name__) @@ -18,8 +18,8 @@ class SpatialQueryService: - area_fallback: Match via area_id when coordinates are not available Statistics computation: - - Delegates to AggregationService (spp_aggregation) - - AggregationService provides unified computation with k-anonymity protection + - Delegates to AnalyticsService (spp_analytics) + - AnalyticsService provides unified computation with k-anonymity protection """ def __init__(self, env): @@ -330,8 +330,8 @@ def _compute_statistics(self, registrant_ids, variables): "computed_at": None, } - if "spp.aggregation.service" not in self.env: - raise RuntimeError("spp.aggregation.service is required for GIS statistics queries.") + if "spp.analytics.service" not in self.env: + raise RuntimeError("spp.analytics.service is required for GIS statistics queries.") return self._compute_via_aggregation_service(registrant_ids, variables) @@ -359,7 +359,7 @@ def _compute_via_aggregation_service(self, registrant_ids, variables): if not statistics_to_compute: # Use GIS-published statistics # nosemgrep: odoo-sudo-without-context - Statistic = self.env["spp.statistic"].sudo() + Statistic = self.env["spp.indicator"].sudo() gis_stats = Statistic.get_published_for_context("gis") statistics_to_compute = [stat.name for stat in gis_stats] if gis_stats else None @@ -372,7 +372,7 @@ def _compute_via_aggregation_service(self, registrant_ids, variables): } # Call AggregationService (no sudo - let service determine access level from calling user) - aggregation_service = self.env["spp.aggregation.service"] + aggregation_service = self.env["spp.analytics.service"] result = aggregation_service.compute_aggregation( scope=scope, statistics=statistics_to_compute, @@ -401,7 +401,7 @@ def _convert_aggregation_result(self, agg_result, registrant_ids=None): grouped_stats = {} # nosemgrep: odoo-sudo-without-context - Statistic = self.env["spp.statistic"].sudo() + Statistic = self.env["spp.indicator"].sudo() statistic_by_name = {stat.name: stat for stat in Statistic.search([("name", "in", list(statistics.keys()))])} for stat_name, stat_data in statistics.items(): diff --git a/spp_api_v2_gis/static/description/index.html b/spp_api_v2_gis/static/description/index.html index 5da61de2..0f27791e 100644 --- a/spp_api_v2_gis/static/description/index.html +++ b/spp_api_v2_gis/static/description/index.html @@ -512,7 +512,7 @@

Scopes and Data Privacy

percentages). Each feature represents an area, not a person.
  • Spatial query statistics (POST /gis/query/statistics): Accepts a GeoJSON polygon and returns configured aggregate statistics computed -by spp.aggregation.service. Individual registrant IDs are computed +by spp.analytics.service. Individual registrant IDs are computed internally for aggregation but are explicitly stripped from the response before it is sent (see spatial_query.py).
  • Exports (GeoPackage/GeoJSON): Contain the same area-level @@ -528,7 +528,7 @@

    Scopes and Data Privacy

    re-identification in small populations.
  • CEL variable configuration: Administrators control which statistics are published and their suppression thresholds via -spp.statistic records.
  • +spp.indicator records.
  • Scope separation: gis:read and gis:geofence are separate scopes, allowing clients to be granted read-only access without write capability.
  • diff --git a/spp_api_v2_gis/tests/test_spatial_query_service.py b/spp_api_v2_gis/tests/test_spatial_query_service.py index 3a895db6..f441041d 100644 --- a/spp_api_v2_gis/tests/test_spatial_query_service.py +++ b/spp_api_v2_gis/tests/test_spatial_query_service.py @@ -146,8 +146,8 @@ def test_empty_statistics(self): self.assertIsInstance(result["statistics"], dict) def test_statistic_model_exists(self): - """Test that spp.statistic model exists and has required fields.""" - Statistic = self.env["spp.statistic"] + """Test that spp.indicator model exists and has required fields.""" + Statistic = self.env["spp.indicator"] # Check that required fields exist self.assertIn("name", Statistic._fields) @@ -159,7 +159,7 @@ def test_statistic_model_exists(self): def test_create_gis_published_statistic(self): """Test creating a statistic published to GIS context.""" - Statistic = self.env["spp.statistic"] + Statistic = self.env["spp.indicator"] CelVariable = self.env["spp.cel.variable"] # Create a CEL variable @@ -195,7 +195,7 @@ def test_create_gis_published_statistic(self): def test_get_published_for_gis_context(self): """Test querying statistics published to GIS context.""" - Statistic = self.env["spp.statistic"] + Statistic = self.env["spp.indicator"] CelVariable = self.env["spp.cel.variable"] # Create a CEL variable @@ -242,7 +242,7 @@ def test_statistics_with_published_statistics(self): from ..services.spatial_query_service import SpatialQueryService CelVariable = self.env["spp.cel.variable"] - Statistic = self.env["spp.statistic"] + Statistic = self.env["spp.indicator"] # Create a CEL variable for counting groups var = CelVariable.create( @@ -299,10 +299,10 @@ def test_suppression_precedence_uses_stricter_threshold(self): from ..services.spatial_query_service import SpatialQueryService CelVariable = self.env["spp.cel.variable"] - Statistic = self.env["spp.statistic"] + Statistic = self.env["spp.indicator"] # User rule sets k=10 - self.env["spp.aggregation.access.rule"].create( + self.env["spp.analytics.access.rule"].create( { "name": "GIS Test Rule k10", "access_level": "aggregate", diff --git a/spp_api_v2_gis/tests/test_statistics_endpoint.py b/spp_api_v2_gis/tests/test_statistics_endpoint.py index f3c4b819..3a4e7ca8 100644 --- a/spp_api_v2_gis/tests/test_statistics_endpoint.py +++ b/spp_api_v2_gis/tests/test_statistics_endpoint.py @@ -49,7 +49,7 @@ def setUpClass(cls): ) # Create GIS-published statistics - cls.gis_stat_1 = cls.env["spp.statistic"].create( + cls.gis_stat_1 = cls.env["spp.indicator"].create( { "name": "total_households_disc", "label": "Total Households", @@ -62,7 +62,7 @@ def setUpClass(cls): } ) - cls.gis_stat_2 = cls.env["spp.statistic"].create( + cls.gis_stat_2 = cls.env["spp.indicator"].create( { "name": "disabled_members_disc", "label": "Disabled Members", @@ -76,7 +76,7 @@ def setUpClass(cls): ) # Non-GIS statistic (should not appear) - cls.non_gis_stat = cls.env["spp.statistic"].create( + cls.non_gis_stat = cls.env["spp.indicator"].create( { "name": "dashboard_only_disc", "label": "Dashboard Only", @@ -89,7 +89,7 @@ def setUpClass(cls): def test_get_published_by_category_returns_gis_stats(self): """Test that get_published_by_category returns GIS-published stats.""" - Statistic = self.env["spp.statistic"] + Statistic = self.env["spp.indicator"] by_category = Statistic.get_published_by_category("gis") # Should include demographics and vulnerability categories @@ -106,7 +106,7 @@ def test_get_published_by_category_returns_gis_stats(self): def test_non_gis_stats_excluded(self): """Test that non-GIS statistics are excluded.""" - Statistic = self.env["spp.statistic"] + Statistic = self.env["spp.indicator"] by_category = Statistic.get_published_by_category("gis") # Collect all stat names across categories @@ -130,19 +130,19 @@ def test_statistic_to_dict(self): def test_statistics_schema_validation(self): """Test that the Pydantic response schema works.""" from ..schemas.statistics import ( - StatisticCategoryInfo, - StatisticInfo, - StatisticsListResponse, + IndicatorCategoryInfo, + IndicatorInfo, + IndicatorsListResponse, ) - response = StatisticsListResponse( + response = IndicatorsListResponse( categories=[ - StatisticCategoryInfo( + IndicatorCategoryInfo( code="demographics", name="Demographics", icon="fa-users", statistics=[ - StatisticInfo( + IndicatorInfo( name="total_households", label="Total Households", description="Count of all households", @@ -161,11 +161,11 @@ def test_statistics_schema_validation(self): self.assertEqual(response.total_count, 1) def test_statistic_info_schema(self): - """Test StatisticInfo schema with optional fields.""" - from ..schemas.statistics import StatisticInfo + """Test IndicatorInfo schema with optional fields.""" + from ..schemas.statistics import IndicatorInfo # Minimal - info = StatisticInfo( + info = IndicatorInfo( name="test", label="Test", format="count", @@ -174,7 +174,7 @@ def test_statistic_info_schema(self): self.assertIsNone(info.unit) # Full - info_full = StatisticInfo( + info_full = IndicatorInfo( name="test", label="Test", description="A test stat", @@ -186,7 +186,7 @@ def test_statistic_info_schema(self): def test_inactive_stats_excluded(self): """Test that inactive statistics are excluded from published list.""" - Statistic = self.env["spp.statistic"] + Statistic = self.env["spp.indicator"] # Create an inactive GIS stat inactive_stat = Statistic.create( diff --git a/spp_api_v2_simulation/README.rst b/spp_api_v2_simulation/README.rst index 3200fcd2..17389ed0 100644 --- a/spp_api_v2_simulation/README.rst +++ b/spp_api_v2_simulation/README.rst @@ -42,48 +42,41 @@ Key Capabilities API Endpoints ~~~~~~~~~~~~~ -+--------+-----------------------------+-----------------------------+ -| Method | Path | Description | -+========+=============================+=============================+ -| GET | ``/simulation/scenarios`` | List scenarios | -+--------+-----------------------------+-----------------------------+ -| POST | ``/simulation/scenarios`` | Create scenario | -+--------+-----------------------------+-----------------------------+ -| GET | ``/ | Get scenario details | -| | simulation/scenarios/{id}`` | | -+--------+-----------------------------+-----------------------------+ -| PUT | ``/ | Update draft scenario | -| | simulation/scenarios/{id}`` | | -+--------+-----------------------------+-----------------------------+ -| DELETE | ``/ | Archive scenario | -| | simulation/scenarios/{id}`` | | -+--------+-----------------------------+-----------------------------+ -| POST | ``/simula | Mark scenario ready | -| | tion/scenarios/{id}/ready`` | | -+--------+-----------------------------+-----------------------------+ -| POST | ``/simu | Execute simulation | -| | lation/scenarios/{id}/run`` | | -+--------+-----------------------------+-----------------------------+ -| POST | ``/simulation/scenario | Convert to program | -| | s/{id}/convert-to-program`` | | -+--------+-----------------------------+-----------------------------+ -| GET | ``/simulation/runs`` | List runs | -+--------+-----------------------------+-----------------------------+ -| GET | ``/simulation/runs/{id}`` | Get run with optional | -| | | details | -+--------+-----------------------------+-----------------------------+ -| POST | ``/simulation/comparisons`` | Create run comparison | -+--------+-----------------------------+-----------------------------+ -| GET | ``/si | Get comparison | -| | mulation/comparisons/{id}`` | | -+--------+-----------------------------+-----------------------------+ -| GET | ``/simulation/templates`` | List scenario templates | -+--------+-----------------------------+-----------------------------+ -| POST | ``/aggregation/compute`` | Compute population | -| | | aggregation | -+--------+-----------------------------+-----------------------------+ -| GET | ``/aggregation/dimensions`` | List demographic dimensions | -+--------+-----------------------------+-----------------------------+ ++--------+---------------------------------------------------+------------------------------+ +| Method | Path | Description | ++========+===================================================+==============================+ +| GET | ``/simulation/scenarios`` | List scenarios | ++--------+---------------------------------------------------+------------------------------+ +| POST | ``/simulation/scenarios`` | Create scenario | ++--------+---------------------------------------------------+------------------------------+ +| GET | ``/simulation/scenarios/{id}`` | Get scenario details | ++--------+---------------------------------------------------+------------------------------+ +| PUT | ``/simulation/scenarios/{id}`` | Update draft scenario | ++--------+---------------------------------------------------+------------------------------+ +| DELETE | ``/simulation/scenarios/{id}`` | Archive scenario | ++--------+---------------------------------------------------+------------------------------+ +| POST | ``/simulation/scenarios/{id}/ready`` | Mark scenario ready | ++--------+---------------------------------------------------+------------------------------+ +| POST | ``/simulation/scenarios/{id}/run`` | Execute simulation | ++--------+---------------------------------------------------+------------------------------+ +| POST | ``/simulation/scenarios/{id}/convert-to-program`` | Convert to program | ++--------+---------------------------------------------------+------------------------------+ +| GET | ``/simulation/runs`` | List runs | ++--------+---------------------------------------------------+------------------------------+ +| GET | ``/simulation/runs/{id}`` | Get run with optional | +| | | details | ++--------+---------------------------------------------------+------------------------------+ +| POST | ``/simulation/comparisons`` | Create run comparison | ++--------+---------------------------------------------------+------------------------------+ +| GET | ``/simulation/comparisons/{id}`` | Get comparison | ++--------+---------------------------------------------------+------------------------------+ +| GET | ``/simulation/templates`` | List scenario templates | ++--------+---------------------------------------------------+------------------------------+ +| POST | ``/aggregation/compute`` | Compute population | +| | | aggregation | ++--------+---------------------------------------------------+------------------------------+ +| GET | ``/aggregation/dimensions`` | List demographic dimensions | ++--------+---------------------------------------------------+------------------------------+ OAuth Scopes ~~~~~~~~~~~~ diff --git a/spp_api_v2_simulation/__manifest__.py b/spp_api_v2_simulation/__manifest__.py index d8d7f10e..1c565838 100644 --- a/spp_api_v2_simulation/__manifest__.py +++ b/spp_api_v2_simulation/__manifest__.py @@ -12,7 +12,7 @@ "depends": [ "spp_api_v2", "spp_simulation", - "spp_aggregation", + "spp_analytics", ], "data": [ "security/ir.model.access.csv", diff --git a/spp_api_v2_simulation/routers/__init__.py b/spp_api_v2_simulation/routers/__init__.py index 0655b6ab..08514102 100644 --- a/spp_api_v2_simulation/routers/__init__.py +++ b/spp_api_v2_simulation/routers/__init__.py @@ -1,14 +1,14 @@ # Part of OpenSPP. See LICENSE file for full copyright and licensing details. """FastAPI routers for simulation and aggregation API.""" -from . import aggregation +from . import analytics from . import simulation from .comparison import comparison_router from .run import run_router from .scenario import scenario_router __all__ = [ - "aggregation", + "analytics", "simulation", "scenario_router", "run_router", diff --git a/spp_api_v2_simulation/routers/aggregation.py b/spp_api_v2_simulation/routers/analytics.py similarity index 90% rename from spp_api_v2_simulation/routers/aggregation.py rename to spp_api_v2_simulation/routers/analytics.py index 412007ac..c4706387 100644 --- a/spp_api_v2_simulation/routers/aggregation.py +++ b/spp_api_v2_simulation/routers/analytics.py @@ -11,12 +11,12 @@ from fastapi import APIRouter, Depends, HTTPException, Query, status -from ..schemas.aggregation import ( - AggregationResponse, +from ..schemas.analytics import ( + AnalyticsResponse, ComputeAggregationRequest, DimensionsListResponse, ) -from ..services.aggregation_api_service import AggregationApiService +from ..services.analytics_api_service import AnalyticsApiService _logger = logging.getLogger(__name__) @@ -25,7 +25,7 @@ @aggregation_router.post( "/compute", - response_model=AggregationResponse, + response_model=AnalyticsResponse, summary="Compute population aggregation", description="Compute population counts and statistics with optional demographic breakdowns.", ) @@ -40,7 +40,7 @@ async def compute_aggregation( aggregation:read scope Response: - AggregationResponse with total_count, statistics, and optional breakdown + AnalyticsResponse with total_count, statistics, and optional breakdown """ if not api_client.has_scope("aggregation", "read"): raise HTTPException( @@ -49,7 +49,7 @@ async def compute_aggregation( ) try: - service = AggregationApiService(env) + service = AnalyticsApiService(env) result = service.compute_aggregation( scope_dict=request.scope.model_dump(), statistics=request.statistics, @@ -100,7 +100,7 @@ async def list_dimensions( ) try: - service = AggregationApiService(env) + service = AnalyticsApiService(env) dimensions = service.list_dimensions(applies_to=applies_to) return {"dimensions": dimensions} diff --git a/spp_api_v2_simulation/schemas/__init__.py b/spp_api_v2_simulation/schemas/__init__.py index b6ac6759..edcbfac9 100644 --- a/spp_api_v2_simulation/schemas/__init__.py +++ b/spp_api_v2_simulation/schemas/__init__.py @@ -1,7 +1,7 @@ # Part of OpenSPP. See LICENSE file for full copyright and licensing details. """Pydantic schemas for simulation and aggregation API.""" -from . import aggregation +from . import analytics from . import simulation from .comparison import ( ComparisonCreateRequest, @@ -32,7 +32,7 @@ ) __all__ = [ - "aggregation", + "analytics", "simulation", # Scenario schemas "EntitlementRuleSchema", diff --git a/spp_api_v2_simulation/schemas/aggregation.py b/spp_api_v2_simulation/schemas/analytics.py similarity index 95% rename from spp_api_v2_simulation/schemas/aggregation.py rename to spp_api_v2_simulation/schemas/analytics.py index d2f5b48d..fb5aa411 100644 --- a/spp_api_v2_simulation/schemas/aggregation.py +++ b/spp_api_v2_simulation/schemas/analytics.py @@ -4,7 +4,7 @@ from pydantic import BaseModel, Field -class AggregationScopeRequest(BaseModel): +class AnalyticsScopeRequest(BaseModel): """Inline scope definition for aggregation queries.""" target_type: str = Field( @@ -24,7 +24,7 @@ class AggregationScopeRequest(BaseModel): class ComputeAggregationRequest(BaseModel): """Request body for POST /aggregation/compute.""" - scope: AggregationScopeRequest = Field( + scope: AnalyticsScopeRequest = Field( ..., description="Scope definition for the aggregation query", ) @@ -38,7 +38,7 @@ class ComputeAggregationRequest(BaseModel): ) -class AggregationResponse(BaseModel): +class AnalyticsResponse(BaseModel): """Response from POST /aggregation/compute.""" total_count: int = Field(..., description="Total registrants matching the scope") diff --git a/spp_api_v2_simulation/services/__init__.py b/spp_api_v2_simulation/services/__init__.py index 93b1bfa1..583bd40a 100644 --- a/spp_api_v2_simulation/services/__init__.py +++ b/spp_api_v2_simulation/services/__init__.py @@ -1,8 +1,8 @@ # Part of OpenSPP. See LICENSE file for full copyright and licensing details. -from . import aggregation_api_service +from . import analytics_api_service from . import simulation_api_service __all__ = [ - "aggregation_api_service", + "analytics_api_service", "simulation_api_service", ] diff --git a/spp_api_v2_simulation/services/aggregation_api_service.py b/spp_api_v2_simulation/services/analytics_api_service.py similarity index 95% rename from spp_api_v2_simulation/services/aggregation_api_service.py rename to spp_api_v2_simulation/services/analytics_api_service.py index dd6fbe71..f8cbe7ec 100644 --- a/spp_api_v2_simulation/services/aggregation_api_service.py +++ b/spp_api_v2_simulation/services/analytics_api_service.py @@ -4,13 +4,13 @@ import json import logging -from odoo.addons.spp_aggregation.services import build_explicit_scope +from odoo.addons.spp_analytics.services import build_explicit_scope _logger = logging.getLogger(__name__) -class AggregationApiService: - """Thin adapter between API layer and spp.aggregation.service.""" +class AnalyticsApiService: + """Thin adapter between API layer and spp.analytics.service.""" def __init__(self, env): """Initialize aggregation API service. @@ -36,7 +36,7 @@ def compute_aggregation(self, scope_dict, statistics=None, group_by=None): """ engine_scope = self._build_engine_scope(scope_dict) # nosemgrep: odoo-sudo-without-context - aggregation_service = self.env["spp.aggregation.service"].sudo() + aggregation_service = self.env["spp.analytics.service"].sudo() result = aggregation_service.compute_aggregation( scope=engine_scope, statistics=statistics, diff --git a/spp_api_v2_simulation/tests/__init__.py b/spp_api_v2_simulation/tests/__init__.py index eaed0343..61fea039 100644 --- a/spp_api_v2_simulation/tests/__init__.py +++ b/spp_api_v2_simulation/tests/__init__.py @@ -2,8 +2,9 @@ from . import test_comparison_api, test_convert_to_program_api, test_run_api, test_scenario_api from . import test_scope_registration -from . import test_aggregation_api -from . import test_aggregation_service +from . import test_analytics_api +from . import test_analytics_service from . import test_run_helpers +from . import test_router_coverage from . import test_scenario_update from . import test_simulation_service diff --git a/spp_api_v2_simulation/tests/test_aggregation_api.py b/spp_api_v2_simulation/tests/test_analytics_api.py similarity index 77% rename from spp_api_v2_simulation/tests/test_aggregation_api.py rename to spp_api_v2_simulation/tests/test_analytics_api.py index 17d24936..1361170a 100644 --- a/spp_api_v2_simulation/tests/test_aggregation_api.py +++ b/spp_api_v2_simulation/tests/test_analytics_api.py @@ -1,5 +1,5 @@ # Part of OpenSPP. See LICENSE file for full copyright and licensing details. -"""Tests for aggregation API schemas and endpoint logic.""" +"""Tests for analytics API schemas and endpoint logic.""" from odoo.tests import tagged @@ -7,23 +7,23 @@ @tagged("-at_install", "post_install") -class TestAggregationApi(SimulationApiTestCase): - """Test aggregation endpoint schemas and logic.""" +class TestAnalyticsApi(SimulationApiTestCase): + """Test analytics endpoint schemas and logic.""" def test_aggregation_scope_request_defaults(self): - """Test AggregationScopeRequest schema defaults.""" - from ..schemas.aggregation import AggregationScopeRequest + """Test AnalyticsScopeRequest schema defaults.""" + from ..schemas.analytics import AnalyticsScopeRequest - scope = AggregationScopeRequest() + scope = AnalyticsScopeRequest() self.assertEqual(scope.target_type, "group") self.assertIsNone(scope.cel_expression) self.assertIsNone(scope.area_id) def test_aggregation_scope_request_with_values(self): - """Test AggregationScopeRequest with explicit values.""" - from ..schemas.aggregation import AggregationScopeRequest + """Test AnalyticsScopeRequest with explicit values.""" + from ..schemas.analytics import AnalyticsScopeRequest - scope = AggregationScopeRequest( + scope = AnalyticsScopeRequest( target_type="individual", cel_expression="r.age >= 18", area_id=42, @@ -34,13 +34,13 @@ def test_aggregation_scope_request_with_values(self): def test_compute_aggregation_request_minimal(self): """Test ComputeAggregationRequest with minimal fields.""" - from ..schemas.aggregation import ( - AggregationScopeRequest, + from ..schemas.analytics import ( + AnalyticsScopeRequest, ComputeAggregationRequest, ) request = ComputeAggregationRequest( - scope=AggregationScopeRequest(), + scope=AnalyticsScopeRequest(), ) self.assertEqual(request.scope.target_type, "group") self.assertIsNone(request.statistics) @@ -48,13 +48,13 @@ def test_compute_aggregation_request_minimal(self): def test_compute_aggregation_request_full(self): """Test ComputeAggregationRequest with all fields.""" - from ..schemas.aggregation import ( - AggregationScopeRequest, + from ..schemas.analytics import ( + AnalyticsScopeRequest, ComputeAggregationRequest, ) request = ComputeAggregationRequest( - scope=AggregationScopeRequest( + scope=AnalyticsScopeRequest( target_type="individual", cel_expression="r.age >= 60", ), @@ -66,10 +66,10 @@ def test_compute_aggregation_request_full(self): self.assertEqual(len(request.group_by), 2) def test_aggregation_response_schema(self): - """Test AggregationResponse schema.""" - from ..schemas.aggregation import AggregationResponse + """Test AnalyticsResponse schema.""" + from ..schemas.analytics import AnalyticsResponse - response = AggregationResponse( + response = AnalyticsResponse( total_count=150, statistics={"average_age": 45.2, "count": 150}, breakdown={"gender": {"male": 70, "female": 80}}, @@ -83,10 +83,10 @@ def test_aggregation_response_schema(self): self.assertFalse(response.from_cache) def test_aggregation_response_no_breakdown(self): - """Test AggregationResponse without breakdown.""" - from ..schemas.aggregation import AggregationResponse + """Test AnalyticsResponse without breakdown.""" + from ..schemas.analytics import AnalyticsResponse - response = AggregationResponse( + response = AnalyticsResponse( total_count=100, statistics={}, from_cache=True, @@ -98,7 +98,7 @@ def test_aggregation_response_no_breakdown(self): def test_dimension_info_schema(self): """Test DimensionInfo schema.""" - from ..schemas.aggregation import DimensionInfo + from ..schemas.analytics import DimensionInfo dim = DimensionInfo( name="gender", @@ -116,7 +116,7 @@ def test_dimension_info_schema(self): def test_dimension_info_minimal(self): """Test DimensionInfo with no optional fields.""" - from ..schemas.aggregation import DimensionInfo + from ..schemas.analytics import DimensionInfo dim = DimensionInfo( name="custom_dim", @@ -129,7 +129,7 @@ def test_dimension_info_minimal(self): def test_dimensions_list_response_schema(self): """Test DimensionsListResponse schema.""" - from ..schemas.aggregation import DimensionInfo, DimensionsListResponse + from ..schemas.analytics import DimensionInfo, DimensionsListResponse dims = [ DimensionInfo( @@ -152,9 +152,9 @@ def test_dimensions_list_response_schema(self): def test_scope_model_dump(self): """Test that scope model_dump produces the expected dict.""" - from ..schemas.aggregation import AggregationScopeRequest + from ..schemas.analytics import AnalyticsScopeRequest - scope = AggregationScopeRequest( + scope = AnalyticsScopeRequest( target_type="group", cel_expression="r.area_id != false", area_id=5, diff --git a/spp_api_v2_simulation/tests/test_aggregation_service.py b/spp_api_v2_simulation/tests/test_analytics_service.py similarity index 94% rename from spp_api_v2_simulation/tests/test_aggregation_service.py rename to spp_api_v2_simulation/tests/test_analytics_service.py index 7304fb81..d63b2e4b 100644 --- a/spp_api_v2_simulation/tests/test_aggregation_service.py +++ b/spp_api_v2_simulation/tests/test_analytics_service.py @@ -11,14 +11,14 @@ @tagged("post_install", "-at_install") -class TestAggregationApiService(SimulationApiTestCommon): - """Test aggregation API service functionality.""" +class TestAnalyticsApiService(SimulationApiTestCommon): + """Test analytics API service functionality.""" def _get_service(self): """Import and create the service.""" - from ..services.aggregation_api_service import AggregationApiService + from ..services.analytics_api_service import AnalyticsApiService - return AggregationApiService(self.env) + return AnalyticsApiService(self.env) def test_compute_aggregation_basic(self): """Test basic aggregation compute with inline scope.""" diff --git a/spp_api_v2_simulation/tests/test_router_coverage.py b/spp_api_v2_simulation/tests/test_router_coverage.py new file mode 100644 index 00000000..8a5cd70e --- /dev/null +++ b/spp_api_v2_simulation/tests/test_router_coverage.py @@ -0,0 +1,994 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Tests for router helper functions and service serialization helpers not covered elsewhere.""" + +import logging +from datetime import datetime + +from odoo.tests import tagged + +from .common import SimulationApiTestCase, SimulationApiTestCommon + +_logger = logging.getLogger(__name__) + + +# --------------------------------------------------------------------------- +# _optional_str helper — defined identically in four routers; we import from +# each one directly to guarantee each copy is exercised. +# --------------------------------------------------------------------------- + + +@tagged("-at_install", "post_install") +class TestOptionalStrHelpers(SimulationApiTestCase): + """Test the _optional_str helper imported from each router module.""" + + # --- analytics router --- + + def test_analytics_optional_str_with_truthy_string(self): + """_optional_str returns value when given a non-empty string.""" + try: + from ..routers.analytics import _optional_str as fn + except ImportError: + self.skipTest("_optional_str not present in analytics router") + self.assertEqual(fn("hello"), "hello") + + def test_analytics_optional_str_with_false(self): + """_optional_str returns None for Odoo False.""" + try: + from ..routers.analytics import _optional_str as fn + except ImportError: + self.skipTest("_optional_str not present in analytics router") + self.assertIsNone(fn(False)) + + def test_analytics_optional_str_with_empty_string(self): + """_optional_str returns None for empty string (falsy).""" + try: + from ..routers.analytics import _optional_str as fn + except ImportError: + self.skipTest("_optional_str not present in analytics router") + self.assertIsNone(fn("")) + + def test_analytics_optional_str_with_none(self): + """_optional_str returns None for None.""" + try: + from ..routers.analytics import _optional_str as fn + except ImportError: + self.skipTest("_optional_str not present in analytics router") + self.assertIsNone(fn(None)) + + # --- comparison router --- + + def test_comparison_optional_str_with_truthy_string(self): + """_optional_str in comparison router returns value for non-empty string.""" + from ..routers.comparison import _optional_str + + self.assertEqual(_optional_str("staleness warning"), "staleness warning") + + def test_comparison_optional_str_with_false(self): + """_optional_str in comparison router returns None for Odoo False.""" + from ..routers.comparison import _optional_str + + self.assertIsNone(_optional_str(False)) + + def test_comparison_optional_str_with_empty_string(self): + """_optional_str in comparison router returns None for empty string.""" + from ..routers.comparison import _optional_str + + self.assertIsNone(_optional_str("")) + + def test_comparison_optional_str_with_none(self): + """_optional_str in comparison router returns None for None.""" + from ..routers.comparison import _optional_str + + self.assertIsNone(_optional_str(None)) + + # --- run router --- + + def test_run_optional_str_with_truthy_string(self): + """_optional_str in run router returns value for non-empty string.""" + from ..routers.run import _optional_str + + self.assertEqual(_optional_str("error message"), "error message") + + def test_run_optional_str_with_false(self): + """_optional_str in run router returns None for Odoo False.""" + from ..routers.run import _optional_str + + self.assertIsNone(_optional_str(False)) + + def test_run_optional_str_with_empty_string(self): + """_optional_str in run router returns None for empty string.""" + from ..routers.run import _optional_str + + self.assertIsNone(_optional_str("")) + + def test_run_optional_str_with_none(self): + """_optional_str in run router returns None for None.""" + from ..routers.run import _optional_str + + self.assertIsNone(_optional_str(None)) + + # --- scenario router --- + + def test_scenario_optional_str_with_truthy_string(self): + """_optional_str in scenario router returns value for non-empty string.""" + from ..routers.scenario import _optional_str + + self.assertEqual(_optional_str("description"), "description") + + def test_scenario_optional_str_with_false(self): + """_optional_str in scenario router returns None for Odoo False.""" + from ..routers.scenario import _optional_str + + self.assertIsNone(_optional_str(False)) + + def test_scenario_optional_str_with_empty_string(self): + """_optional_str in scenario router returns None for empty string.""" + from ..routers.scenario import _optional_str + + self.assertIsNone(_optional_str("")) + + def test_scenario_optional_str_with_none(self): + """_optional_str in scenario router returns None for None.""" + from ..routers.scenario import _optional_str + + self.assertIsNone(_optional_str(None)) + + def test_scenario_optional_str_with_zero(self): + """_optional_str returns None for numeric zero (falsy).""" + from ..routers.scenario import _optional_str + + self.assertIsNone(_optional_str(0)) + + def test_scenario_optional_str_with_truthy_number(self): + """_optional_str returns value for non-zero numeric (truthy).""" + from ..routers.scenario import _optional_str + + self.assertEqual(_optional_str(42), 42) + + +# --------------------------------------------------------------------------- +# _build_convert_options — already partially tested; add the remaining paths +# --------------------------------------------------------------------------- + + +@tagged("-at_install", "post_install") +class TestBuildConvertOptionsAdditional(SimulationApiTestCase): + """Cover remaining branches of _build_convert_options not hit by existing tests.""" + + def _build(self, **kwargs): + from ..routers.scenario import _build_convert_options + from ..schemas.scenario import ConvertToProgramRequest + + request = ConvertToProgramRequest(**kwargs) + return _build_convert_options(request) + + def test_is_one_time_distribution_included(self): + """is_one_time_distribution=True should appear in options.""" + options = self._build(is_one_time_distribution=True) + self.assertTrue(options.get("is_one_time_distribution")) + + def test_import_beneficiaries_included(self): + """import_beneficiaries=True should appear in options.""" + options = self._build(import_beneficiaries=True) + self.assertTrue(options.get("import_beneficiaries")) + + def test_byday_included(self): + """byday should appear in options when supplied.""" + options = self._build(rrule_type="monthly", byday="1") + self.assertEqual(options.get("byday"), "1") + + def test_weekday_included(self): + """weekday should appear in options when supplied.""" + options = self._build(rrule_type="monthly", weekday="MO") + self.assertEqual(options.get("weekday"), "MO") + + def test_all_weekday_flags(self): + """All seven weekday flags are included when True.""" + options = self._build( + rrule_type="weekly", + mon=True, + tue=True, + wed=True, + thu=True, + fri=True, + sat=True, + sun=True, + ) + for day in ("mon", "tue", "wed", "thu", "fri", "sat", "sun"): + self.assertTrue(options.get(day), f"Expected {day} to be True in options") + + def test_false_weekday_flags_excluded(self): + """Weekday flags that are False should not appear in options.""" + options = self._build(rrule_type="weekly", mon=True) + # tue through sun should be absent (they default to False) + for day in ("tue", "wed", "thu", "fri", "sat", "sun"): + self.assertNotIn(day, options, f"Unexpected key {day} in options") + + def test_cycle_duration_zero_excluded(self): + """cycle_duration=None should not add the key.""" + options = self._build() + self.assertNotIn("cycle_duration", options) + + def test_day_zero_excluded(self): + """day=None should not add the key.""" + options = self._build() + self.assertNotIn("day", options) + + +# --------------------------------------------------------------------------- +# _comparison_to_response helper (comparison router) +# --------------------------------------------------------------------------- + + +@tagged("-at_install", "post_install") +class TestComparisonToResponse(SimulationApiTestCase): + """Test _comparison_to_response helper in comparison router.""" + + def _make_comparison(self, comparison_json=None, overlap_count_json=None, staleness_warning=False): + """Create a minimal comparison record for testing.""" + runs = [self.run_completed, self.run_completed_2] + comparison = self.env["spp.simulation.comparison"].create( + { + "name": "Test Comparison", + "run_ids": [(6, 0, [r.id for r in runs])], + } + ) + vals = {} + if comparison_json is not None: + vals["comparison_json"] = comparison_json + if overlap_count_json is not None: + vals["overlap_count_json"] = overlap_count_json + if staleness_warning: + vals["staleness_warning"] = staleness_warning + if vals: + comparison.write(vals) + return comparison + + def test_comparison_to_response_empty_json(self): + """Response has empty runs and overlap when JSON fields are missing.""" + from ..routers.comparison import _comparison_to_response + + comparison = self._make_comparison() + response = _comparison_to_response(comparison) + + self.assertEqual(response.id, comparison.id) + self.assertEqual(response.name, "Test Comparison") + self.assertEqual(len(response.runs), 0) + self.assertEqual(len(response.overlap_data), 0) + self.assertIsNone(response.staleness_warning) + + def test_comparison_to_response_with_runs_json(self): + """Response includes run data from comparison_json.""" + from ..routers.comparison import _comparison_to_response + + comparison_json = { + "runs": [ + { + "run_id": self.run_completed.id, + "scenario_name": "Scenario A", + "beneficiary_count": 5, + "total_cost": 2500.0, + "coverage_rate": 50.0, + "equity_score": 85.0, + "gini_coefficient": 0.15, + "has_disparity": False, + "leakage_rate": 0.0, + "undercoverage_rate": 0.0, + "budget_utilization": 50.0, + "executed_at": "2024-01-01T00:00:00", + } + ] + } + comparison = self._make_comparison(comparison_json=comparison_json) + response = _comparison_to_response(comparison) + + self.assertEqual(len(response.runs), 1) + self.assertEqual(response.runs[0].scenario_name, "Scenario A") + self.assertEqual(response.runs[0].beneficiary_count, 5) + self.assertEqual(response.runs[0].total_cost, 2500.0) + + def test_comparison_to_response_with_overlap_json(self): + """Response includes overlap data from overlap_count_json.""" + from ..routers.comparison import _comparison_to_response + + overlap_count_json = { + "pair_1_2": { + "run_a_id": self.run_completed.id, + "run_a_name": "Run A", + "run_b_id": self.run_completed_2.id, + "run_b_name": "Run B", + "overlap_count": 3, + "union_count": 7, + "jaccard_index": 0.43, + } + } + comparison = self._make_comparison(overlap_count_json=overlap_count_json) + response = _comparison_to_response(comparison) + + self.assertEqual(len(response.overlap_data), 1) + self.assertEqual(response.overlap_data[0].overlap_count, 3) + self.assertEqual(response.overlap_data[0].union_count, 7) + self.assertAlmostEqual(response.overlap_data[0].jaccard_index, 0.43, places=2) + + def test_comparison_to_response_with_staleness_warning(self): + """Response includes staleness_warning when set.""" + from ..routers.comparison import _comparison_to_response + + comparison = self._make_comparison(staleness_warning="Scenario has changed since this run") + response = _comparison_to_response(comparison) + + self.assertIsNotNone(response.staleness_warning) + self.assertIn("changed", response.staleness_warning) + + def test_comparison_to_response_run_defaults(self): + """Run data missing keys falls back to defaults (0, False, 0.0).""" + from ..routers.comparison import _comparison_to_response + + # Minimal run dict — all optional fields missing + comparison_json = { + "runs": [ + { + "run_id": 1, + "scenario_name": "Minimal Run", + } + ] + } + comparison = self._make_comparison(comparison_json=comparison_json) + response = _comparison_to_response(comparison) + + run = response.runs[0] + self.assertEqual(run.beneficiary_count, 0) + self.assertEqual(run.total_cost, 0.0) + self.assertFalse(run.has_disparity) + self.assertIsNone(run.executed_at) + + +# --------------------------------------------------------------------------- +# AnalyticsApiService._parse_value_labels +# --------------------------------------------------------------------------- + + +@tagged("post_install", "-at_install") +class TestParseValueLabels(SimulationApiTestCommon): + """Test AnalyticsApiService._parse_value_labels static method.""" + + def _fn(self, value): + from ..services.analytics_api_service import AnalyticsApiService + + return AnalyticsApiService._parse_value_labels(value) + + def test_parse_value_labels_none(self): + """Returns None for None input.""" + self.assertIsNone(self._fn(None)) + + def test_parse_value_labels_false(self): + """Returns None for Odoo False input.""" + self.assertIsNone(self._fn(False)) + + def test_parse_value_labels_empty_string(self): + """Returns None for empty string.""" + self.assertIsNone(self._fn("")) + + def test_parse_value_labels_dict_passthrough(self): + """Returns dict unchanged when already a dict.""" + data = {"M": "Male", "F": "Female"} + result = self._fn(data) + self.assertEqual(result, data) + + def test_parse_value_labels_valid_json_string(self): + """Parses valid JSON string to dict.""" + result = self._fn('{"1": "Yes", "0": "No"}') + self.assertEqual(result, {"1": "Yes", "0": "No"}) + + def test_parse_value_labels_invalid_json_string(self): + """Returns None for invalid JSON string.""" + result = self._fn("{not valid json}") + self.assertIsNone(result) + + def test_parse_value_labels_non_string_non_dict(self): + """Returns None for unexpected types like int.""" + result = self._fn(42) + self.assertIsNone(result) + + def test_parse_value_labels_list_value(self): + """Returns None for a list (not dict or str).""" + result = self._fn(["a", "b"]) + self.assertIsNone(result) + + +# --------------------------------------------------------------------------- +# AnalyticsApiService._build_engine_scope +# --------------------------------------------------------------------------- + + +@tagged("post_install", "-at_install") +class TestBuildEngineScope(SimulationApiTestCommon): + """Test AnalyticsApiService._build_engine_scope method.""" + + def _get_service(self): + from ..services.analytics_api_service import AnalyticsApiService + + return AnalyticsApiService(self.env) + + def test_build_engine_scope_group_target(self): + """Returns explicit scope dict for group target type.""" + service = self._get_service() + result = service._build_engine_scope({"target_type": "group"}) + + self.assertIsInstance(result, dict) + self.assertEqual(result.get("scope_type"), "explicit") + self.assertIn("explicit_partner_ids", result) + + def test_build_engine_scope_individual_target(self): + """Returns explicit scope dict for individual target type.""" + service = self._get_service() + result = service._build_engine_scope({"target_type": "individual"}) + + self.assertIsInstance(result, dict) + self.assertEqual(result.get("scope_type"), "explicit") + self.assertIn("explicit_partner_ids", result) + + def test_build_engine_scope_default_target_is_group(self): + """target_type defaults to 'group' when absent.""" + service = self._get_service() + result_default = service._build_engine_scope({}) + result_group = service._build_engine_scope({"target_type": "group"}) + + # Both should resolve to the same scope type + self.assertEqual(result_default.get("scope_type"), result_group.get("scope_type")) + + def test_build_engine_scope_with_area_id(self): + """area_id filter is applied when provided.""" + service = self._get_service() + + # Create an area with a registrant + area = self.env["spp.area"].create({"draft_name": "Coverage Test Area"}) + self.env["res.partner"].create( + { + "name": "Area Group", + "is_registrant": True, + "is_group": True, + "area_id": area.id, + } + ) + + result_with_area = service._build_engine_scope({"target_type": "group", "area_id": area.id}) + result_without_area = service._build_engine_scope({"target_type": "group"}) + + # With area filter, partner_ids should be a subset of without-area + self.assertIsInstance(result_with_area["explicit_partner_ids"], list) + self.assertIsInstance(result_without_area["explicit_partner_ids"], list) + self.assertLessEqual( + len(result_with_area["explicit_partner_ids"]), + len(result_without_area["explicit_partner_ids"]), + ) + + def test_build_engine_scope_with_invalid_cel_expression_falls_back(self): + """Invalid CEL expression falls back to unfiltered partner set.""" + service = self._get_service() + + # Provide a cel_expression that will fail — the service must log and continue + result = service._build_engine_scope( + { + "target_type": "group", + "cel_expression": "INVALID CEL $$$ SYNTAX", + } + ) + + # Should still return a valid scope dict + self.assertIsInstance(result, dict) + self.assertEqual(result.get("scope_type"), "explicit") + + +# --------------------------------------------------------------------------- +# SimulationApiService serialization helpers +# --------------------------------------------------------------------------- + + +@tagged("post_install", "-at_install") +class TestSimulationSerializationHelpers(SimulationApiTestCommon): + """Test private serialization helpers on SimulationApiService.""" + + def _get_service(self): + from ..services.simulation_api_service import SimulationApiService + + return SimulationApiService(self.env) + + # --- _serialize_scenario --- + + def test_serialize_scenario_basic_fields(self): + """_serialize_scenario returns all expected top-level keys.""" + service = self._get_service() + result = service._serialize_scenario(self.scenario) + + expected_keys = { + "id", + "name", + "description", + "category", + "template_id", + "target_type", + "targeting_expression", + "budget_amount", + "budget_strategy", + "ideal_population_expression", + "state", + "targeting_preview_count", + "entitlement_rules", + "run_count", + } + self.assertEqual(set(result.keys()), expected_keys) + + def test_serialize_scenario_values(self): + """_serialize_scenario returns correct values for a known scenario.""" + service = self._get_service() + result = service._serialize_scenario(self.scenario) + + self.assertEqual(result["id"], self.scenario.id) + self.assertEqual(result["name"], "Test Scenario") + self.assertEqual(result["target_type"], "group") + self.assertEqual(result["state"], "draft") + self.assertEqual(result["budget_amount"], 50000.0) + + def test_serialize_scenario_null_optionals(self): + """Optional string fields are None when Odoo returns False.""" + service = self._get_service() + + # Create a scenario with no description or category + bare_scenario = self.env["spp.simulation.scenario"].create( + { + "name": "Bare Scenario", + "target_type": "individual", + "targeting_expression": "", + } + ) + result = service._serialize_scenario(bare_scenario) + + self.assertIsNone(result["description"]) + self.assertIsNone(result["category"]) + self.assertIsNone(result["template_id"]) + self.assertIsNone(result["ideal_population_expression"]) + + def test_serialize_scenario_includes_rules(self): + """_serialize_scenario includes serialized entitlement rules.""" + service = self._get_service() + result = service._serialize_scenario(self.scenario) + + self.assertIsInstance(result["entitlement_rules"], list) + self.assertGreaterEqual(len(result["entitlement_rules"]), 1) + + rule = result["entitlement_rules"][0] + self.assertIn("id", rule) + self.assertIn("amount_mode", rule) + self.assertIn("amount", rule) + + def test_serialize_scenario_with_template(self): + """_serialize_scenario includes template_id when set.""" + service = self._get_service() + + scenario_with_template = self.env["spp.simulation.scenario"].create( + { + "name": "Templated Scenario", + "target_type": "group", + "targeting_expression": "true", + "template_id": self.template.id, + } + ) + result = service._serialize_scenario(scenario_with_template) + + self.assertEqual(result["template_id"], self.template.id) + + # --- _serialize_entitlement_rule --- + + def test_serialize_entitlement_rule_basic(self): + """_serialize_entitlement_rule returns all expected keys.""" + service = self._get_service() + result = service._serialize_entitlement_rule(self.entitlement_rule) + + expected_keys = { + "id", + "name", + "sequence", + "amount_mode", + "amount", + "multiplier_field", + "max_multiplier", + "amount_cel_expression", + "condition_cel_expression", + } + self.assertEqual(set(result.keys()), expected_keys) + + def test_serialize_entitlement_rule_values(self): + """_serialize_entitlement_rule returns correct field values.""" + service = self._get_service() + result = service._serialize_entitlement_rule(self.entitlement_rule) + + self.assertEqual(result["id"], self.entitlement_rule.id) + self.assertEqual(result["amount_mode"], "fixed") + self.assertEqual(result["amount"], 500.0) + self.assertEqual(result["sequence"], 10) + + def test_serialize_entitlement_rule_optional_none_for_false(self): + """Optional fields are None when Odoo stores False.""" + service = self._get_service() + result = service._serialize_entitlement_rule(self.entitlement_rule) + + self.assertIsNone(result["multiplier_field"]) + self.assertIsNone(result["amount_cel_expression"]) + self.assertIsNone(result["condition_cel_expression"]) + + def test_serialize_entitlement_rule_multiplier_mode(self): + """Multiplier rule fields are serialized correctly.""" + service = self._get_service() + + multiplier_rule = self.env["spp.simulation.entitlement.rule"].create( + { + "scenario_id": self.scenario.id, + "name": "Multiplier Rule", + "amount_mode": "multiplier", + "amount": 100.0, + "multiplier_field": "household_size", + "max_multiplier": 8, + "sequence": 20, + } + ) + result = service._serialize_entitlement_rule(multiplier_rule) + + self.assertEqual(result["amount_mode"], "multiplier") + self.assertEqual(result["multiplier_field"], "household_size") + self.assertEqual(result["max_multiplier"], 8) + + def test_serialize_entitlement_rule_cel_mode(self): + """CEL rule expressions are serialized correctly.""" + service = self._get_service() + + cel_rule = self.env["spp.simulation.entitlement.rule"].create( + { + "scenario_id": self.scenario.id, + "name": "CEL Rule", + "amount_mode": "cel", + "amount": 0.0, + "amount_cel_expression": "r.income * 0.1", + "condition_cel_expression": "r.income < 10000", + "sequence": 30, + } + ) + result = service._serialize_entitlement_rule(cel_rule) + + self.assertEqual(result["amount_mode"], "cel") + self.assertEqual(result["amount_cel_expression"], "r.income * 0.1") + self.assertEqual(result["condition_cel_expression"], "r.income < 10000") + + # --- _serialize_run_headline --- + + def test_serialize_run_headline_keys(self): + """_serialize_run_headline returns expected top-level keys.""" + service = self._get_service() + + # Need a real run record — run a simulation first + self.scenario.write({"targeting_expression": "true"}) + run_result = service.run_simulation(self.scenario.id) + run = self.env["spp.simulation.run"].browse(run_result["id"]) + + result = service._serialize_run_headline(run) + + expected_keys = { + "id", + "scenario_id", + "scenario_name", + "state", + "beneficiary_count", + "total_registry_count", + "coverage_rate", + "total_cost", + "budget_utilization", + "gini_coefficient", + "equity_score", + "has_disparity", + "executed_at", + "execution_duration_seconds", + "error_message", + } + self.assertEqual(set(result.keys()), expected_keys) + + def test_serialize_run_headline_values(self): + """_serialize_run_headline returns correct scalar values.""" + service = self._get_service() + + self.scenario.write({"targeting_expression": "true"}) + run_result = service.run_simulation(self.scenario.id) + run = self.env["spp.simulation.run"].browse(run_result["id"]) + + result = service._serialize_run_headline(run) + + self.assertEqual(result["id"], run.id) + self.assertEqual(result["scenario_id"], self.scenario.id) + self.assertEqual(result["scenario_name"], self.scenario.name) + self.assertIn(result["state"], ("completed", "failed")) + + def test_serialize_run_headline_executed_at_iso(self): + """executed_at is serialized as ISO string when set.""" + service = self._get_service() + + run = self.env["spp.simulation.run"].create( + { + "scenario_id": self.scenario.id, + "state": "completed", + "beneficiary_count": 2, + "total_cost": 1000.0, + "executed_at": datetime(2025, 6, 15, 12, 0, 0), + } + ) + result = service._serialize_run_headline(run) + + self.assertIsNotNone(result["executed_at"]) + self.assertIn("2025", result["executed_at"]) + + def test_serialize_run_headline_no_executed_at(self): + """executed_at is None when the field is empty.""" + service = self._get_service() + + run = self.env["spp.simulation.run"].create( + { + "scenario_id": self.scenario.id, + "state": "failed", + "beneficiary_count": 0, + "total_cost": 0.0, + } + ) + # Clear executed_at to test the None branch + run.executed_at = False + result = service._serialize_run_headline(run) + + self.assertIsNone(result["executed_at"]) + + def test_serialize_run_headline_error_message_none_for_false(self): + """error_message is None when Odoo stores False.""" + service = self._get_service() + + run = self.env["spp.simulation.run"].create( + { + "scenario_id": self.scenario.id, + "state": "completed", + "beneficiary_count": 1, + "total_cost": 500.0, + } + ) + result = service._serialize_run_headline(run) + + self.assertIsNone(result["error_message"]) + + # --- _serialize_run_detail --- + + def test_serialize_run_detail_extends_headline(self): + """_serialize_run_detail includes all headline keys plus detail-specific keys.""" + service = self._get_service() + + run = self.env["spp.simulation.run"].create( + { + "scenario_id": self.scenario.id, + "state": "completed", + "beneficiary_count": 3, + "total_cost": 1500.0, + } + ) + result = service._serialize_run_detail(run) + + detail_only_keys = { + "leakage_rate", + "undercoverage_rate", + "distribution_json", + "fairness_json", + "targeting_efficiency_json", + "geographic_json", + "metric_results_json", + "scenario_snapshot_json", + } + for key in detail_only_keys: + self.assertIn(key, result, f"Missing detail key: {key}") + + def test_serialize_run_detail_json_fields_are_none_when_empty(self): + """JSON detail fields are None when not populated.""" + service = self._get_service() + + run = self.env["spp.simulation.run"].create( + { + "scenario_id": self.scenario.id, + "state": "failed", + "beneficiary_count": 0, + "total_cost": 0.0, + } + ) + result = service._serialize_run_detail(run) + + self.assertIsNone(result["distribution_json"]) + self.assertIsNone(result["fairness_json"]) + self.assertIsNone(result["targeting_efficiency_json"]) + self.assertIsNone(result["geographic_json"]) + self.assertIsNone(result["metric_results_json"]) + self.assertIsNone(result["scenario_snapshot_json"]) + + def test_serialize_run_detail_json_fields_returned_when_set(self): + """JSON detail fields are returned as-is when populated.""" + service = self._get_service() + + distribution_data = { + "count": 3, + "total": 1500.0, + "minimum": 500.0, + "maximum": 500.0, + "mean": 500.0, + "median": 500.0, + "standard_deviation": 0.0, + "gini_coefficient": 0.0, + "percentiles": {}, + } + run = self.env["spp.simulation.run"].create( + { + "scenario_id": self.scenario.id, + "state": "completed", + "beneficiary_count": 3, + "total_cost": 1500.0, + "distribution_json": distribution_data, + } + ) + result = service._serialize_run_detail(run) + + self.assertIsNotNone(result["distribution_json"]) + self.assertEqual(result["distribution_json"]["count"], 3) + + # --- _serialize_comparison --- + + def test_serialize_comparison_keys(self): + """_serialize_comparison returns expected top-level keys.""" + service = self._get_service() + + self.scenario.write({"targeting_expression": "true"}) + run_a_result = service.run_simulation(self.scenario.id) + + scenario_b = self.env["spp.simulation.scenario"].create( + { + "name": "Scenario B For Comparison", + "target_type": "group", + "targeting_expression": "true", + "state": "draft", + } + ) + self.env["spp.simulation.entitlement.rule"].create( + { + "scenario_id": scenario_b.id, + "name": "Rule B", + "amount_mode": "fixed", + "amount": 200.0, + } + ) + run_b_result = service.run_simulation(scenario_b.id) + + result = service.compare_runs([run_a_result["id"], run_b_result["id"]]) + + expected_keys = {"id", "name", "runs", "overlap", "staleness_warning"} + self.assertEqual(set(result.keys()), expected_keys) + + def test_serialize_comparison_runs_list(self): + """_serialize_comparison includes per-run metrics in the runs list.""" + service = self._get_service() + + self.scenario.write({"targeting_expression": "true"}) + run_a_result = service.run_simulation(self.scenario.id) + + scenario_b = self.env["spp.simulation.scenario"].create( + { + "name": "Scenario B Runs List", + "target_type": "group", + "targeting_expression": "true", + "state": "draft", + } + ) + self.env["spp.simulation.entitlement.rule"].create( + { + "scenario_id": scenario_b.id, + "name": "Rule", + "amount_mode": "fixed", + "amount": 150.0, + } + ) + run_b_result = service.run_simulation(scenario_b.id) + + result = service.compare_runs([run_a_result["id"], run_b_result["id"]]) + + self.assertIsInstance(result["runs"], list) + self.assertIsInstance(result["overlap"], list) + + def test_serialize_comparison_staleness_none_when_false(self): + """staleness_warning is None when Odoo returns False on the record.""" + service = self._get_service() + + # Need at least 2 runs for a comparison + run_result_a = service.run_simulation(self.scenario.id) + run_result_b = service.run_simulation(self.scenario.id) + + comparison = self.env["spp.simulation.comparison"].create( + { + "name": "Staleness Test", + "run_ids": [(6, 0, [run_result_a["id"], run_result_b["id"]])], + } + ) + + result = service._serialize_comparison(comparison) + self.assertIsNone(result["staleness_warning"]) + + def test_serialize_comparison_with_explicit_comparison_json(self): + """_serialize_comparison parses comparison_json runs correctly.""" + service = self._get_service() + + comparison = self.env["spp.simulation.comparison"].create( + { + "name": "Direct JSON Test", + "comparison_json": { + "runs": [ + { + "run_id": 1, + "scenario_name": "Alpha", + "beneficiary_count": 10, + "total_cost": 5000.0, + "coverage_rate": 100.0, + "equity_score": 90.0, + "gini_coefficient": 0.05, + "has_disparity": False, + "leakage_rate": 0.0, + "undercoverage_rate": 0.0, + "budget_utilization": 100.0, + "executed_at": "2025-01-01T00:00:00", + } + ] + }, + "overlap_count_json": { + "pair_1_2": { + "run_a_id": 1, + "run_a_name": "Alpha", + "run_b_id": 2, + "run_b_name": "Beta", + "overlap_count": 5, + "union_count": 15, + "jaccard_index": 0.33, + } + }, + } + ) + + result = service._serialize_comparison(comparison) + + self.assertEqual(len(result["runs"]), 1) + self.assertEqual(result["runs"][0]["scenario_name"], "Alpha") + self.assertEqual(result["runs"][0]["beneficiary_count"], 10) + + self.assertEqual(len(result["overlap"]), 1) + self.assertEqual(result["overlap"][0]["overlap_count"], 5) + self.assertAlmostEqual(result["overlap"][0]["jaccard_index"], 0.33, places=2) + + def test_serialize_comparison_missing_run_fields_use_defaults(self): + """Run dicts with missing keys fall back to zero/False defaults.""" + service = self._get_service() + + comparison = self.env["spp.simulation.comparison"].create( + { + "name": "Defaults Test", + "comparison_json": { + "runs": [ + { + "run_id": 99, + "scenario_name": "Partial", + # all numeric fields absent + } + ] + }, + } + ) + + result = service._serialize_comparison(comparison) + + run = result["runs"][0] + self.assertEqual(run["beneficiary_count"], 0) + self.assertEqual(run["total_cost"], 0.0) + self.assertFalse(run["has_disparity"]) + self.assertIsNone(run["executed_at"]) diff --git a/spp_statistic/README.rst b/spp_indicator/README.rst similarity index 93% rename from spp_statistic/README.rst rename to spp_indicator/README.rst index 488bcd24..22486a61 100644 --- a/spp_statistic/README.rst +++ b/spp_indicator/README.rst @@ -1,6 +1,6 @@ -================== -OpenSPP Statistics -================== +================= +OpenSPP Indicator +================= .. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -17,7 +17,7 @@ OpenSPP Statistics :target: http://www.gnu.org/licenses/lgpl-3.0-standalone.html :alt: License: LGPL-3 .. |badge3| image:: https://img.shields.io/badge/github-OpenSPP%2FOpenSPP2-lightgray.png?logo=github - :target: https://github.com/OpenSPP/OpenSPP2/tree/19.0/spp_statistic + :target: https://github.com/OpenSPP/OpenSPP2/tree/19.0/spp_indicator :alt: OpenSPP/OpenSPP2 |badge1| |badge2| |badge3| @@ -43,15 +43,15 @@ Key Capabilities Key Models ~~~~~~~~~~ -+---------------------------+-----------------------------------------+ -| Model | Description | -+===========================+=========================================+ -| ``spp.statistic`` | A publishable statistic linked to a CEL | -| | variable | -+---------------------------+-----------------------------------------+ -| ``spp.statistic.context`` | Per-context presentation and privacy | -| | overrides | -+---------------------------+-----------------------------------------+ ++---------------------------+------------------------------------------+ +| Model | Description | ++===========================+==========================================+ +| ``spp.statistic`` | A publishable statistic linked to a CEL | +| | variable | ++---------------------------+------------------------------------------+ +| ``spp.statistic.context`` | Per-context presentation and privacy | +| | overrides | ++---------------------------+------------------------------------------+ Configuration ~~~~~~~~~~~~~ @@ -109,7 +109,7 @@ Bug Tracker Bugs are tracked on `GitHub Issues `_. In case of trouble, please check there if your issue has already been reported. If you spotted it first, help us to smash it by providing a detailed and welcomed -`feedback `_. +`feedback `_. Do not contact contributors directly about support or help with technical issues. @@ -135,6 +135,6 @@ Current maintainers: |maintainer-jeremi| |maintainer-gonzalesedwin1123| -This module is part of the `OpenSPP/OpenSPP2 `_ project on GitHub. +This module is part of the `OpenSPP/OpenSPP2 `_ project on GitHub. You are welcome to contribute. \ No newline at end of file diff --git a/spp_statistic/__init__.py b/spp_indicator/__init__.py similarity index 100% rename from spp_statistic/__init__.py rename to spp_indicator/__init__.py diff --git a/spp_statistic/__manifest__.py b/spp_indicator/__manifest__.py similarity index 77% rename from spp_statistic/__manifest__.py rename to spp_indicator/__manifest__.py index d26a67b7..84d45fb3 100644 --- a/spp_statistic/__manifest__.py +++ b/spp_indicator/__manifest__.py @@ -1,7 +1,7 @@ # Part of OpenSPP. See LICENSE file for full copyright and licensing details. { - "name": "OpenSPP Statistics", - "summary": "Publishable statistics based on CEL variables for dashboards, GIS, and APIs", + "name": "OpenSPP Indicator", + "summary": "Publishable indicators based on CEL variables for dashboards, GIS, and APIs", "category": "OpenSPP", "version": "19.0.2.0.0", "sequence": 1, @@ -11,13 +11,14 @@ "development_status": "Alpha", "maintainers": ["jeremi", "gonzalesedwin1123"], "depends": [ + "spp_metric", + "spp_metric_service", "spp_cel_domain", - "spp_metrics_core", "spp_security", ], "data": [ "security/ir.model.access.csv", - "data/statistic_categories.xml", + "data/indicator_categories.xml", ], "assets": {}, "demo": [], diff --git a/spp_statistic/data/statistic_categories.xml b/spp_indicator/data/indicator_categories.xml similarity index 97% rename from spp_statistic/data/statistic_categories.xml rename to spp_indicator/data/indicator_categories.xml index 466c710e..eca6fa80 100644 --- a/spp_statistic/data/statistic_categories.xml +++ b/spp_indicator/data/indicator_categories.xml @@ -5,7 +5,7 @@ Standard categories for organizing publishable statistics. These align with common social protection reporting needs. - Note: Uses spp.metric.category (migrated from spp.statistic.category) + Note: Uses spp.metric.category (migrated from spp.indicator.category) --> diff --git a/spp_indicator/models/__init__.py b/spp_indicator/models/__init__.py new file mode 100644 index 00000000..73f413eb --- /dev/null +++ b/spp_indicator/models/__init__.py @@ -0,0 +1,4 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +# statistic_category moved to spp_metric as metric_category +from . import indicator +from . import indicator_context diff --git a/spp_statistic/models/statistic.py b/spp_indicator/models/indicator.py similarity index 90% rename from spp_statistic/models/statistic.py rename to spp_indicator/models/indicator.py index 99f00d3c..ef7d4f42 100644 --- a/spp_statistic/models/statistic.py +++ b/spp_indicator/models/indicator.py @@ -10,22 +10,22 @@ _logger = logging.getLogger(__name__) -class Statistic(models.Model): - """A publishable statistic based on a CEL variable. +class Indicator(models.Model): + """A publishable indicator based on a CEL variable. - Statistics separate concerns: + Indicators separate concerns: - CEL Variable: "What data and how to compute it" - - Statistic: "Where to publish and how to present it" + - Indicator: "Where to publish and how to present it" - A single CEL variable can be published as multiple statistics + A single CEL variable can be published as multiple indicators with different presentations for different contexts. Inherits from spp.metric.base for common metric fields (name, label, description, category_id, sequence, etc.) """ - _name = "spp.statistic" - _description = "Publishable Statistic" + _name = "spp.indicator" + _description = "Publishable Indicator" _inherit = ["spp.metric.base"] _order = "category_id, sequence, name" @@ -96,7 +96,7 @@ class Statistic(models.Model): # ─── Organization ─────────────────────────────────────────────────── # category_id and sequence inherited from spp.metric.base - # Note: category_id points to spp.metric.category (migrated from spp.statistic.category) + # Note: category_id points to spp.metric.category (migrated from spp.indicator.category) # ─── Publication Flags ────────────────────────────────────────────── is_published_gis = fields.Boolean( @@ -122,7 +122,7 @@ class Statistic(models.Model): # ─── Context-specific Configuration ───────────────────────────────── context_ids = fields.One2many( - comodel_name="spp.statistic.context", + comodel_name="spp.indicator.context", inverse_name="statistic_id", string="Context Configurations", help="Context-specific presentation overrides", @@ -265,24 +265,10 @@ def apply_suppression(self, value, count=None, context=None): if count is None: count = value if isinstance(value, int) else 0 - # Delegate to unified privacy service - privacy_service = self.env.get("spp.metrics.privacy") - if privacy_service is not None: - stat_config = {"minimum_count": min_count, "suppression_display": display_mode} - return privacy_service.suppress_value(value, count, stat_config=stat_config) - - # Fallback: inline suppression if service unavailable - if count < min_count: - if display_mode == "null": - return None, True - elif display_mode == "asterisk": - return "*", True - elif display_mode == "less_than": - return f"<{min_count}", True - else: - return None, True - - return value, False + # Delegate to unified privacy service (spp_metric_service is a hard dependency) + privacy_service = self.env["spp.metric.privacy"] + stat_config = {"minimum_count": min_count, "suppression_display": display_mode} + return privacy_service.suppress_value(value, count, stat_config=stat_config) def to_dict(self, context=None): """Convert statistic to dictionary for API/UI consumption. diff --git a/spp_statistic/models/statistic_context.py b/spp_indicator/models/indicator_context.py similarity index 91% rename from spp_statistic/models/statistic_context.py rename to spp_indicator/models/indicator_context.py index b5a7276a..a54c3309 100644 --- a/spp_statistic/models/statistic_context.py +++ b/spp_indicator/models/indicator_context.py @@ -4,21 +4,21 @@ from odoo import fields, models -class StatisticContext(models.Model): - """Context-specific presentation configuration for a statistic. +class IndicatorContext(models.Model): + """Context-specific presentation configuration for an indicator. Allows overriding default presentation settings for specific contexts - (GIS, dashboard, API, reports). For example, a statistic might use + (GIS, dashboard, API, reports). For example, an indicator might use a different label or grouping in the GIS context vs. dashboard. """ - _name = "spp.statistic.context" - _description = "Statistic Context Configuration" + _name = "spp.indicator.context" + _description = "Indicator Context Configuration" _order = "statistic_id, context" statistic_id = fields.Many2one( - comodel_name="spp.statistic", - string="Statistic", + comodel_name="spp.indicator", + string="Indicator", required=True, ondelete="cascade", ) diff --git a/spp_metrics_core/pyproject.toml b/spp_indicator/pyproject.toml similarity index 100% rename from spp_metrics_core/pyproject.toml rename to spp_indicator/pyproject.toml diff --git a/spp_statistic/readme/DESCRIPTION.md b/spp_indicator/readme/DESCRIPTION.md similarity index 100% rename from spp_statistic/readme/DESCRIPTION.md rename to spp_indicator/readme/DESCRIPTION.md diff --git a/spp_indicator/security/ir.model.access.csv b/spp_indicator/security/ir.model.access.csv new file mode 100644 index 00000000..dcf7f65e --- /dev/null +++ b/spp_indicator/security/ir.model.access.csv @@ -0,0 +1,5 @@ +id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink +access_spp_indicator_user,spp.indicator.user,model_spp_indicator,base.group_user,1,0,0,0 +access_spp_indicator_admin,spp.indicator.admin,model_spp_indicator,spp_security.group_spp_admin,1,1,1,1 +access_spp_indicator_context_user,spp.indicator.context.user,model_spp_indicator_context,base.group_user,1,0,0,0 +access_spp_indicator_context_admin,spp.indicator.context.admin,model_spp_indicator_context,spp_security.group_spp_admin,1,1,1,1 diff --git a/spp_indicator/static/description/index.html b/spp_indicator/static/description/index.html new file mode 100644 index 00000000..1e8a622f --- /dev/null +++ b/spp_indicator/static/description/index.html @@ -0,0 +1 @@ +

    spp_indicator

    diff --git a/spp_statistic/tests/__init__.py b/spp_indicator/tests/__init__.py similarity index 63% rename from spp_statistic/tests/__init__.py rename to spp_indicator/tests/__init__.py index e73e3e48..66298c00 100644 --- a/spp_statistic/tests/__init__.py +++ b/spp_indicator/tests/__init__.py @@ -1,2 +1,2 @@ # Part of OpenSPP. See LICENSE file for full copyright and licensing details. -from . import test_statistic +from . import test_coverage, test_indicator diff --git a/spp_indicator/tests/test_coverage.py b/spp_indicator/tests/test_coverage.py new file mode 100644 index 00000000..aef905f4 --- /dev/null +++ b/spp_indicator/tests/test_coverage.py @@ -0,0 +1,293 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Tests for apply_suppression method coverage in spp.indicator.""" + +from odoo.tests.common import TransactionCase, tagged + + +@tagged("post_install", "-at_install") +class TestApplySuppression(TransactionCase): + """Tests for the apply_suppression k-anonymity method on spp.indicator.""" + + @classmethod + def setUpClass(cls): + """Set up shared indicator for suppression tests.""" + super().setUpClass() + + cls.cel_variable = cls.env["spp.cel.variable"].create( + { + "name": "suppression_test_var", + "cel_accessor": "suppression_test", + "source_type": "computed", + "cel_expression": "true", + "value_type": "number", + "state": "active", + } + ) + + # Default indicator: minimum_count=5, suppression_display='less_than' + cls.indicator = cls.env["spp.indicator"].create( + { + "name": "suppression_test_stat", + "label": "Suppression Test", + "variable_id": cls.cel_variable.id, + "format": "count", + "minimum_count": 5, + "suppression_display": "less_than", + } + ) + + # ─── Normal (pass-through) cases ──────────────────────────────────── + + def test_count_above_threshold_passes_through(self): + """Value is returned unchanged when count is at or above the threshold.""" + value, is_suppressed = self.indicator.apply_suppression(100, count=10) + + self.assertEqual(value, 100) + self.assertFalse(is_suppressed) + + def test_count_equal_to_threshold_passes_through(self): + """Value is returned unchanged when count exactly equals the threshold.""" + value, is_suppressed = self.indicator.apply_suppression(50, count=5) + + self.assertEqual(value, 50) + self.assertFalse(is_suppressed) + + # ─── Suppression cases ────────────────────────────────────────────── + + def test_count_below_threshold_is_suppressed(self): + """Value is suppressed when count is below the minimum threshold.""" + value, is_suppressed = self.indicator.apply_suppression(3, count=3) + + self.assertTrue(is_suppressed) + + def test_count_zero_is_suppressed(self): + """A count of zero is below the threshold and must be suppressed.""" + value, is_suppressed = self.indicator.apply_suppression(0, count=0) + + self.assertTrue(is_suppressed) + + def test_count_one_is_suppressed(self): + """A count of one is below the default threshold of 5.""" + value, is_suppressed = self.indicator.apply_suppression(1, count=1) + + self.assertTrue(is_suppressed) + + # ─── Suppression display modes ────────────────────────────────────── + + def test_suppression_display_less_than(self): + """less_than mode returns '<{threshold}' as display value.""" + indicator = self.env["spp.indicator"].create( + { + "name": "less_than_mode_stat", + "label": "Less Than Mode", + "variable_id": self.cel_variable.id, + "minimum_count": 5, + "suppression_display": "less_than", + } + ) + + display_value, is_suppressed = indicator.apply_suppression(3, count=3) + + self.assertTrue(is_suppressed) + self.assertEqual(display_value, "<5") + + def test_suppression_display_asterisk(self): + """asterisk mode returns '*' as display value.""" + indicator = self.env["spp.indicator"].create( + { + "name": "asterisk_mode_stat", + "label": "Asterisk Mode", + "variable_id": self.cel_variable.id, + "minimum_count": 5, + "suppression_display": "asterisk", + } + ) + + display_value, is_suppressed = indicator.apply_suppression(3, count=3) + + self.assertTrue(is_suppressed) + self.assertEqual(display_value, "*") + + def test_suppression_display_null(self): + """null mode returns None as display value.""" + indicator = self.env["spp.indicator"].create( + { + "name": "null_mode_stat", + "label": "Null Mode", + "variable_id": self.cel_variable.id, + "minimum_count": 5, + "suppression_display": "null", + } + ) + + display_value, is_suppressed = indicator.apply_suppression(3, count=3) + + self.assertTrue(is_suppressed) + self.assertIsNone(display_value) + + # ─── Return tuple format ──────────────────────────────────────────── + + def test_return_value_is_tuple(self): + """apply_suppression always returns a 2-tuple.""" + result = self.indicator.apply_suppression(100, count=10) + + self.assertIsInstance(result, tuple) + self.assertEqual(len(result), 2) + + def test_return_tuple_unsuppressed_types(self): + """Unsuppressed result has original value type and bool False.""" + display_value, is_suppressed = self.indicator.apply_suppression(42, count=10) + + self.assertEqual(display_value, 42) + self.assertIsInstance(is_suppressed, bool) + self.assertFalse(is_suppressed) + + def test_return_tuple_suppressed_types(self): + """Suppressed result has string/None display value and bool True.""" + display_value, is_suppressed = self.indicator.apply_suppression(3, count=3) + + self.assertIsInstance(is_suppressed, bool) + self.assertTrue(is_suppressed) + + # ─── None count edge cases ────────────────────────────────────────── + + def test_none_count_uses_int_value_as_count(self): + """When count is None and value is int, value is used as count.""" + # value=3 is an int below threshold=5, so should be suppressed + value, is_suppressed = self.indicator.apply_suppression(3) + + self.assertTrue(is_suppressed) + + def test_none_count_with_high_int_value_passes_through(self): + """When count is None and value is int above threshold, no suppression.""" + value, is_suppressed = self.indicator.apply_suppression(100) + + self.assertEqual(value, 100) + self.assertFalse(is_suppressed) + + def test_none_count_with_non_int_value_uses_zero(self): + """When count is None and value is not an int, count defaults to 0. + + 0 is below the threshold of 5, so the value is suppressed. + """ + value, is_suppressed = self.indicator.apply_suppression(3.14) + + self.assertTrue(is_suppressed) + + def test_none_count_with_string_value_uses_zero(self): + """When count is None and value is a string, count defaults to 0.""" + value, is_suppressed = self.indicator.apply_suppression("some_value") + + self.assertTrue(is_suppressed) + + # ─── Context-specific minimum_count override ──────────────────────── + + def test_context_minimum_count_override_suppresses_with_higher_threshold(self): + """A context record with higher minimum_count suppresses values the default would pass.""" + indicator = self.env["spp.indicator"].create( + { + "name": "ctx_threshold_stat", + "label": "Context Threshold", + "variable_id": self.cel_variable.id, + "minimum_count": 5, + "suppression_display": "less_than", + } + ) + self.env["spp.indicator.context"].create( + { + "statistic_id": indicator.id, + "context": "gis", + "minimum_count": 20, + } + ) + + # count=8 passes the default threshold of 5 but not the GIS override of 20 + value, is_suppressed = indicator.apply_suppression(8, count=8, context="gis") + + self.assertTrue(is_suppressed) + + def test_context_minimum_count_override_passes_when_above_override_threshold(self): + """Values above the context-specific threshold are not suppressed.""" + indicator = self.env["spp.indicator"].create( + { + "name": "ctx_threshold_pass_stat", + "label": "Context Threshold Pass", + "variable_id": self.cel_variable.id, + "minimum_count": 5, + "suppression_display": "less_than", + } + ) + self.env["spp.indicator.context"].create( + { + "statistic_id": indicator.id, + "context": "gis", + "minimum_count": 10, + } + ) + + # count=15 exceeds the GIS override of 10 + value, is_suppressed = indicator.apply_suppression(15, count=15, context="gis") + + self.assertEqual(value, 15) + self.assertFalse(is_suppressed) + + def test_no_context_record_falls_back_to_indicator_defaults(self): + """When no context record exists, indicator-level defaults are used.""" + indicator = self.env["spp.indicator"].create( + { + "name": "no_ctx_fallback_stat", + "label": "No Context Fallback", + "variable_id": self.cel_variable.id, + "minimum_count": 5, + "suppression_display": "asterisk", + } + ) + # No context record created — should use indicator minimum_count=5 + display_value, is_suppressed = indicator.apply_suppression(3, count=3, context="dashboard") + + self.assertTrue(is_suppressed) + self.assertEqual(display_value, "*") + + def test_context_none_uses_indicator_defaults_directly(self): + """When context=None, indicator-level config is used without lookup.""" + indicator = self.env["spp.indicator"].create( + { + "name": "null_ctx_stat", + "label": "Null Context", + "variable_id": self.cel_variable.id, + "minimum_count": 10, + "suppression_display": "null", + } + ) + + # count=7 is below minimum_count=10 + display_value, is_suppressed = indicator.apply_suppression(7, count=7) + + self.assertTrue(is_suppressed) + self.assertIsNone(display_value) + + # ─── Threshold boundary with context ──────────────────────────────── + + def test_context_less_than_display_shows_context_threshold(self): + """less_than display uses the context-specific threshold value.""" + indicator = self.env["spp.indicator"].create( + { + "name": "ctx_lt_display_stat", + "label": "Context Less Than", + "variable_id": self.cel_variable.id, + "minimum_count": 5, + "suppression_display": "less_than", + } + ) + self.env["spp.indicator.context"].create( + { + "statistic_id": indicator.id, + "context": "api", + "minimum_count": 15, + } + ) + + display_value, is_suppressed = indicator.apply_suppression(8, count=8, context="api") + + self.assertTrue(is_suppressed) + self.assertEqual(display_value, "<15") diff --git a/spp_statistic/tests/test_statistic.py b/spp_indicator/tests/test_indicator.py similarity index 95% rename from spp_statistic/tests/test_statistic.py rename to spp_indicator/tests/test_indicator.py index a99a5484..7d2847c2 100644 --- a/spp_statistic/tests/test_statistic.py +++ b/spp_indicator/tests/test_indicator.py @@ -8,7 +8,7 @@ class TestStatisticCategory(TransactionCase): """Test statistic category model integration. - Note: Category model tests are in spp_metrics_core/tests/test_metric_category.py + Note: Category model tests are in spp_metric/tests/test_metric_category.py This class tests the integration between statistics and categories. """ @@ -46,7 +46,7 @@ def test_statistic_uses_metric_category(self): } ) - stat = self.env["spp.statistic"].create( + stat = self.env["spp.indicator"].create( { "name": "test_stat", "label": "Test Stat", @@ -91,7 +91,7 @@ def setUpClass(cls): def test_create_statistic(self): """Test creating a statistic.""" - Statistic = self.env["spp.statistic"] + Statistic = self.env["spp.indicator"] stat = Statistic.create( { @@ -113,7 +113,7 @@ def test_create_statistic(self): def test_name_format_validation(self): """Test that statistic names must be snake_case.""" - Statistic = self.env["spp.statistic"] + Statistic = self.env["spp.indicator"] # Valid names stat = Statistic.create( @@ -147,7 +147,7 @@ def test_name_format_validation(self): def test_get_published_for_context(self): """Test querying statistics by context.""" - Statistic = self.env["spp.statistic"] + Statistic = self.env["spp.indicator"] # Create statistics with different publication flags stat_gis = Statistic.create( @@ -190,7 +190,7 @@ def test_get_published_for_context(self): def test_get_published_by_category(self): """Test grouping statistics by category.""" - Statistic = self.env["spp.statistic"] + Statistic = self.env["spp.indicator"] cat2 = self.env["spp.metric.category"].create({"name": "Second", "code": "second"}) @@ -220,7 +220,7 @@ def test_get_published_by_category(self): def test_to_dict(self): """Test dictionary conversion for API.""" - Statistic = self.env["spp.statistic"] + Statistic = self.env["spp.indicator"] stat = Statistic.create( { @@ -262,7 +262,7 @@ def setUpClass(cls): } ) - cls.statistic = cls.env["spp.statistic"].create( + cls.statistic = cls.env["spp.indicator"].create( { "name": "context_test_stat", "label": "Default Label", @@ -275,7 +275,7 @@ def setUpClass(cls): def test_context_override(self): """Test that context-specific config overrides defaults.""" - Context = self.env["spp.statistic.context"] + Context = self.env["spp.indicator.context"] # Create GIS-specific override Context.create( @@ -301,7 +301,7 @@ def test_context_override(self): def test_context_unique_constraint(self): """Test that each statistic can only have one config per context.""" - Context = self.env["spp.statistic.context"] + Context = self.env["spp.indicator.context"] Context.create( { diff --git a/spp_statistic_studio/README.rst b/spp_indicator_studio/README.rst similarity index 90% rename from spp_statistic_studio/README.rst rename to spp_indicator_studio/README.rst index 7c50c68a..eb603155 100644 --- a/spp_statistic_studio/README.rst +++ b/spp_indicator_studio/README.rst @@ -1,6 +1,6 @@ -========================= -OpenSPP Statistics Studio -========================= +======================== +OpenSPP Indicator Studio +======================== .. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -17,7 +17,7 @@ OpenSPP Statistics Studio :target: http://www.gnu.org/licenses/lgpl-3.0-standalone.html :alt: License: LGPL-3 .. |badge3| image:: https://img.shields.io/badge/github-OpenSPP%2FOpenSPP2-lightgray.png?logo=github - :target: https://github.com/OpenSPP/OpenSPP2/tree/19.0/spp_statistic_studio + :target: https://github.com/OpenSPP/OpenSPP2/tree/19.0/spp_indicator_studio :alt: OpenSPP/OpenSPP2 |badge1| |badge2| |badge3| @@ -72,7 +72,7 @@ Bug Tracker Bugs are tracked on `GitHub Issues `_. In case of trouble, please check there if your issue has already been reported. If you spotted it first, help us to smash it by providing a detailed and welcomed -`feedback `_. +`feedback `_. Do not contact contributors directly about support or help with technical issues. @@ -87,6 +87,6 @@ Authors Maintainers ----------- -This module is part of the `OpenSPP/OpenSPP2 `_ project on GitHub. +This module is part of the `OpenSPP/OpenSPP2 `_ project on GitHub. You are welcome to contribute. \ No newline at end of file diff --git a/spp_statistic_studio/__init__.py b/spp_indicator_studio/__init__.py similarity index 100% rename from spp_statistic_studio/__init__.py rename to spp_indicator_studio/__init__.py diff --git a/spp_statistic_studio/__manifest__.py b/spp_indicator_studio/__manifest__.py similarity index 62% rename from spp_statistic_studio/__manifest__.py rename to spp_indicator_studio/__manifest__.py index 58792641..9b11a91f 100644 --- a/spp_statistic_studio/__manifest__.py +++ b/spp_indicator_studio/__manifest__.py @@ -1,23 +1,23 @@ # Part of OpenSPP. See LICENSE file for full copyright and licensing details. { - "name": "OpenSPP Statistics Studio", + "name": "OpenSPP Indicator Studio", "version": "19.0.2.0.0", "category": "OpenSPP/Configuration", - "summary": "Studio UI for managing publishable statistics", + "summary": "Studio UI for managing publishable indicators", "author": "OpenSPP.org", "website": "https://github.com/OpenSPP/OpenSPP2", "license": "LGPL-3", "depends": [ - "spp_statistic", + "spp_indicator", "spp_studio", ], "data": [ "security/ir.model.access.csv", - "views/statistic_views.xml", - "views/statistic_category_views.xml", + "views/indicator_views.xml", + "views/indicator_category_views.xml", "views/menus.xml", ], "installable": True, - # Bridge module: auto-install when both spp_statistic and spp_studio are present + # Bridge module: auto-install when both spp_indicator and spp_studio are present "auto_install": True, } diff --git a/spp_metrics_services/pyproject.toml b/spp_indicator_studio/pyproject.toml similarity index 100% rename from spp_metrics_services/pyproject.toml rename to spp_indicator_studio/pyproject.toml diff --git a/spp_statistic_studio/readme/DESCRIPTION.md b/spp_indicator_studio/readme/DESCRIPTION.md similarity index 100% rename from spp_statistic_studio/readme/DESCRIPTION.md rename to spp_indicator_studio/readme/DESCRIPTION.md diff --git a/spp_indicator_studio/security/ir.model.access.csv b/spp_indicator_studio/security/ir.model.access.csv new file mode 100644 index 00000000..bd31a343 --- /dev/null +++ b/spp_indicator_studio/security/ir.model.access.csv @@ -0,0 +1,4 @@ +id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink +access_spp_indicator_studio_admin,spp.indicator studio admin,spp_indicator.model_spp_indicator,spp_studio.group_studio_manager,1,1,1,1 +access_spp_metric_category_studio_admin,spp.metric.category studio admin,spp_metric.model_spp_metric_category,spp_studio.group_studio_manager,1,1,1,1 +access_spp_indicator_context_studio_admin,spp.indicator.context studio admin,spp_indicator.model_spp_indicator_context,spp_studio.group_studio_manager,1,1,1,1 diff --git a/spp_indicator_studio/static/description/index.html b/spp_indicator_studio/static/description/index.html new file mode 100644 index 00000000..5f68e691 --- /dev/null +++ b/spp_indicator_studio/static/description/index.html @@ -0,0 +1 @@ +

    spp_indicator_studio

    diff --git a/spp_statistic_studio/views/statistic_category_views.xml b/spp_indicator_studio/views/indicator_category_views.xml similarity index 97% rename from spp_statistic_studio/views/statistic_category_views.xml rename to spp_indicator_studio/views/indicator_category_views.xml index 38cb74dd..9eb40d23 100644 --- a/spp_statistic_studio/views/statistic_category_views.xml +++ b/spp_indicator_studio/views/indicator_category_views.xml @@ -1,8 +1,8 @@ diff --git a/spp_statistic_studio/views/statistic_views.xml b/spp_indicator_studio/views/indicator_views.xml similarity index 92% rename from spp_statistic_studio/views/statistic_views.xml rename to spp_indicator_studio/views/indicator_views.xml index 14e30147..f5a2e35e 100644 --- a/spp_statistic_studio/views/statistic_views.xml +++ b/spp_indicator_studio/views/indicator_views.xml @@ -1,15 +1,15 @@ - spp.statistic.view.list - spp.statistic + spp.indicator.view.list + spp.indicator - + @@ -29,10 +29,10 @@ - spp.statistic.view.form - spp.statistic + spp.indicator.view.form + spp.indicator - + - spp.statistic.view.search - spp.statistic + spp.indicator.view.search + spp.indicator - + @@ -192,8 +192,8 @@ - spp.statistic.view.kanban - spp.statistic + spp.indicator.view.kanban + spp.indicator @@ -238,18 +238,18 @@ - Statistics - spp.statistic + Indicators + spp.indicator list,kanban,form {'search_default_group_category': 1}

    - Create your first statistic + Create your first indicator

    - Statistics define what data to publish and where. - Each statistic is linked to a CEL variable that defines the computation. + Indicators define what data to publish and where. + Each indicator is linked to a CEL variable that defines the computation.

    diff --git a/spp_statistic_studio/views/menus.xml b/spp_indicator_studio/views/menus.xml similarity index 72% rename from spp_statistic_studio/views/menus.xml rename to spp_indicator_studio/views/menus.xml index b0434f04..01f45d15 100644 --- a/spp_statistic_studio/views/menus.xml +++ b/spp_indicator_studio/views/menus.xml @@ -1,31 +1,31 @@ - + - + - + my_metric My Custom Metric - +
    ``` @@ -169,8 +169,8 @@ See [Migration Guide](../../docs/migration/statistics-refactoring.md) for detail ## Used By -- `spp_metrics_services` - Aggregation and computation services -- `spp_statistic` - Published statistics +- `spp_metric_service` - Computation services +- `spp_indicator` - Publishable indicators - `spp_simulation` - Simulation metrics - Domain modules with custom metrics diff --git a/spp_metrics_core/README.rst b/spp_metric/README.rst similarity index 93% rename from spp_metrics_core/README.rst rename to spp_metric/README.rst index 30fd2ef3..cf858df1 100644 --- a/spp_metrics_core/README.rst +++ b/spp_metric/README.rst @@ -1,6 +1,6 @@ -==================== -OpenSPP Metrics Core -==================== +============== +OpenSPP Metric +============== .. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -17,7 +17,7 @@ OpenSPP Metrics Core :target: http://www.gnu.org/licenses/lgpl-3.0-standalone.html :alt: License: LGPL-3 .. |badge3| image:: https://img.shields.io/badge/github-OpenSPP%2FOpenSPP2-lightgray.png?logo=github - :target: https://github.com/OpenSPP/OpenSPP2/tree/19.0/spp_metrics_core + :target: https://github.com/OpenSPP/OpenSPP2/tree/19.0/spp_metric :alt: OpenSPP/OpenSPP2 |badge1| |badge2| |badge3| @@ -96,7 +96,7 @@ Bug Tracker Bugs are tracked on `GitHub Issues `_. In case of trouble, please check there if your issue has already been reported. If you spotted it first, help us to smash it by providing a detailed and welcomed -`feedback `_. +`feedback `_. Do not contact contributors directly about support or help with technical issues. @@ -122,6 +122,6 @@ Current maintainers: |maintainer-jeremi| |maintainer-gonzalesedwin1123| -This module is part of the `OpenSPP/OpenSPP2 `_ project on GitHub. +This module is part of the `OpenSPP/OpenSPP2 `_ project on GitHub. You are welcome to contribute. \ No newline at end of file diff --git a/spp_metrics_core/__init__.py b/spp_metric/__init__.py similarity index 100% rename from spp_metrics_core/__init__.py rename to spp_metric/__init__.py diff --git a/spp_metrics_core/__manifest__.py b/spp_metric/__manifest__.py similarity index 86% rename from spp_metrics_core/__manifest__.py rename to spp_metric/__manifest__.py index 590500d6..7f6d2b0c 100644 --- a/spp_metrics_core/__manifest__.py +++ b/spp_metric/__manifest__.py @@ -1,7 +1,7 @@ # Part of OpenSPP. See LICENSE file for full copyright and licensing details. { - "name": "OpenSPP Metrics Core", - "summary": "Unified metric foundation for statistics and simulations", + "name": "OpenSPP Metric", + "summary": "Unified metric foundation for indicators and simulations", "category": "OpenSPP", "version": "19.0.2.0.0", "sequence": 1, diff --git a/spp_metrics_core/data/metric_categories.xml b/spp_metric/data/metric_categories.xml similarity index 100% rename from spp_metrics_core/data/metric_categories.xml rename to spp_metric/data/metric_categories.xml diff --git a/spp_metrics_core/migrations/19.0.1.0.0/pre-migrate.py b/spp_metric/migrations/19.0.1.0.0/pre-migrate.py similarity index 100% rename from spp_metrics_core/migrations/19.0.1.0.0/pre-migrate.py rename to spp_metric/migrations/19.0.1.0.0/pre-migrate.py diff --git a/spp_metrics_core/models/__init__.py b/spp_metric/models/__init__.py similarity index 100% rename from spp_metrics_core/models/__init__.py rename to spp_metric/models/__init__.py diff --git a/spp_metrics_core/models/metric_base.py b/spp_metric/models/metric_base.py similarity index 96% rename from spp_metrics_core/models/metric_base.py rename to spp_metric/models/metric_base.py index 6c63e25b..59df2063 100644 --- a/spp_metrics_core/models/metric_base.py +++ b/spp_metric/models/metric_base.py @@ -24,11 +24,11 @@ class MetricBase(models.AbstractModel): ----- Concrete models inherit this to avoid field duplication: - class Statistic(models.Model): - _name = "spp.statistic" + class Indicator(models.Model): + _name = "spp.indicator" _inherit = ["spp.metric.base"] - # Add statistic-specific fields + # Add indicator-specific fields variable_id = fields.Many2one(...) format = fields.Selection([("count", "Count"), ...]) is_published_gis = fields.Boolean() diff --git a/spp_metrics_core/models/metric_category.py b/spp_metric/models/metric_category.py similarity index 98% rename from spp_metrics_core/models/metric_category.py rename to spp_metric/models/metric_category.py index 2b3541f3..948b42b1 100644 --- a/spp_metrics_core/models/metric_category.py +++ b/spp_metric/models/metric_category.py @@ -13,7 +13,7 @@ class MetricCategory(models.Model): """Categorization for metrics. Provides unified category management for all metric types: - - Statistics (spp.statistic) + - Indicators (spp.indicator) - Simulation metrics (spp.simulation.metric) - Future metric types diff --git a/spp_statistic/pyproject.toml b/spp_metric/pyproject.toml similarity index 100% rename from spp_statistic/pyproject.toml rename to spp_metric/pyproject.toml diff --git a/spp_metrics_core/readme/DESCRIPTION.md b/spp_metric/readme/DESCRIPTION.md similarity index 100% rename from spp_metrics_core/readme/DESCRIPTION.md rename to spp_metric/readme/DESCRIPTION.md diff --git a/spp_metrics_core/security/ir.model.access.csv b/spp_metric/security/ir.model.access.csv similarity index 100% rename from spp_metrics_core/security/ir.model.access.csv rename to spp_metric/security/ir.model.access.csv diff --git a/spp_metrics_core/static/description/icon.png b/spp_metric/static/description/icon.png similarity index 100% rename from spp_metrics_core/static/description/icon.png rename to spp_metric/static/description/icon.png diff --git a/spp_metric/static/description/index.html b/spp_metric/static/description/index.html new file mode 100644 index 00000000..0b464c28 --- /dev/null +++ b/spp_metric/static/description/index.html @@ -0,0 +1 @@ +

    spp_metric

    diff --git a/spp_metrics_core/tests/__init__.py b/spp_metric/tests/__init__.py similarity index 100% rename from spp_metrics_core/tests/__init__.py rename to spp_metric/tests/__init__.py diff --git a/spp_metrics_core/tests/test_coverage.py b/spp_metric/tests/test_coverage.py similarity index 100% rename from spp_metrics_core/tests/test_coverage.py rename to spp_metric/tests/test_coverage.py diff --git a/spp_metrics_core/tests/test_metric_base.py b/spp_metric/tests/test_metric_base.py similarity index 75% rename from spp_metrics_core/tests/test_metric_base.py rename to spp_metric/tests/test_metric_base.py index edfbe0cb..68b89b7c 100644 --- a/spp_metrics_core/tests/test_metric_base.py +++ b/spp_metric/tests/test_metric_base.py @@ -21,13 +21,13 @@ def setUpClass(cls): def test_metric_base_fields_exist(self): """Test that base model shared fields are defined.""" - # Skip if spp_statistic is not installed - if "spp.statistic" not in self.env: - self.skipTest("spp_statistic module not installed") + # Skip if spp_indicator is not installed + if "spp.indicator" not in self.env: + self.skipTest("spp_indicator module not installed") # Get a concrete model that inherits from metric.base - # We'll use spp.statistic which should inherit from it - fields = self.env["spp.statistic"]._fields + # We'll use spp.indicator which should inherit from it + fields = self.env["spp.indicator"]._fields # Identity fields (from base) self.assertIn("name", fields, "name field should exist") @@ -50,27 +50,27 @@ def test_metric_base_fields_exist(self): # Note: metric_type, cel_expression, aggregation, format are NOT in base # They are defined by concrete models with model-specific selections - def test_metric_base_inherited_by_statistic(self): - """Test that spp.statistic inherits from spp.metric.base.""" - # Skip if spp_statistic is not installed - if "spp.statistic" not in self.env: - self.skipTest("spp_statistic module not installed") + def test_metric_base_inherited_by_indicator(self): + """Test that spp.indicator inherits from spp.metric.base.""" + # Skip if spp_indicator is not installed + if "spp.indicator" not in self.env: + self.skipTest("spp_indicator module not installed") # Check if spp.metric.base is in the inheritance chain - stat_model = self.env["spp.statistic"] + stat_model = self.env["spp.indicator"] self.assertIn( "spp.metric.base", stat_model._inherit if isinstance(stat_model._inherit, list) else [stat_model._inherit], - "spp.statistic should inherit from spp.metric.base", + "spp.indicator should inherit from spp.metric.base", ) def test_metric_base_default_values(self): """Test default field values from base model.""" - # Skip if spp_statistic is not installed - if "spp.statistic" not in self.env: - self.skipTest("spp_statistic module not installed") + # Skip if spp_indicator is not installed + if "spp.indicator" not in self.env: + self.skipTest("spp_indicator module not installed") - # Create a minimal statistic to test defaults + # Create a minimal indicator to test defaults # We need a CEL variable first variable = self.env["spp.cel.variable"].create( { @@ -82,7 +82,7 @@ def test_metric_base_default_values(self): } ) - stat = self.env["spp.statistic"].create( + stat = self.env["spp.indicator"].create( { "name": "test_metric", "label": "Test Metric", @@ -97,9 +97,9 @@ def test_metric_base_default_values(self): def test_metric_base_category_assignment(self): """Test that metrics can be assigned to categories.""" - # Skip if spp_statistic is not installed - if "spp.statistic" not in self.env: - self.skipTest("spp_statistic module not installed") + # Skip if spp_indicator is not installed + if "spp.indicator" not in self.env: + self.skipTest("spp_indicator module not installed") variable = self.env["spp.cel.variable"].create( { @@ -111,7 +111,7 @@ def test_metric_base_category_assignment(self): } ) - stat = self.env["spp.statistic"].create( + stat = self.env["spp.indicator"].create( { "name": "test_metric_2", "label": "Test Metric 2", diff --git a/spp_metrics_core/tests/test_metric_category.py b/spp_metric/tests/test_metric_category.py similarity index 100% rename from spp_metrics_core/tests/test_metric_category.py rename to spp_metric/tests/test_metric_category.py diff --git a/spp_metrics_core/tests/test_migration.py b/spp_metric/tests/test_migration.py similarity index 100% rename from spp_metrics_core/tests/test_migration.py rename to spp_metric/tests/test_migration.py diff --git a/spp_metrics_services/README.md b/spp_metric_service/README.md similarity index 80% rename from spp_metrics_services/README.md rename to spp_metric_service/README.md index cfd475bb..916515f0 100644 --- a/spp_metrics_services/README.md +++ b/spp_metric_service/README.md @@ -1,28 +1,28 @@ -# OpenSPP Metrics Services +# OpenSPP Metric Service -Shared computation and caching services for metrics across OpenSPP modules. +Computation services for metrics across OpenSPP modules. ## Overview -`spp_metrics_services` provides the core computation engine for all metrics in OpenSPP, +`spp_metric_service` provides the core computation engine for all metrics in OpenSPP, including population statistics, simulation outcomes, fairness analysis, and privacy protection. These services are used by GIS, dashboards, simulations, and APIs. ## Architecture ``` -spp.aggregation.service (Main Entry Point) +spp.analytics.service (Main Entry Point) │ - ├── spp.metrics.breakdown (Multi-dimensional grouping) - │ └── spp.metrics.dimension.cache (Performance optimization) - ├── spp.metrics.fairness (Equity analysis) - ├── spp.metrics.distribution (Statistical distributions) - └── spp.metrics.privacy (K-anonymity enforcement) + ├── spp.metric.breakdown (Multi-dimensional grouping) + │ └── spp.metric.dimension.cache (Performance optimization) + ├── spp.metric.fairness (Equity analysis) + ├── spp.metric.distribution (Statistical distributions) + └── spp.metric.privacy (K-anonymity enforcement) ``` ## Services -### spp.aggregation.service +### spp.analytics.service **Main entry point** for all aggregation computations. @@ -65,7 +65,7 @@ compute_aggregation(scope, statistics=None, group_by=None, context=None) **Example:** ```python -service = env['spp.aggregation.service'] +service = env['spp.analytics.service'] scope = { 'scope_type': 'area', @@ -80,7 +80,7 @@ result = service.compute_aggregation( ) ``` -### spp.metrics.breakdown +### spp.metric.breakdown Computes multi-dimensional breakdowns with caching. @@ -93,13 +93,13 @@ compute_breakdown(registrant_ids, group_by, statistics=None, context=None) **Features:** - Supports up to 3 simultaneous dimensions -- Automatic caching via `spp.metrics.dimension.cache` +- Automatic caching via `spp.metric.dimension.cache` - Privacy enforcement on small groups **Example:** ```python -breakdown_service = env['spp.metrics.breakdown'] +breakdown_service = env['spp.metric.breakdown'] result = breakdown_service.compute_breakdown( registrant_ids=[1, 2, 3, 4, 5], @@ -109,7 +109,7 @@ result = breakdown_service.compute_breakdown( ) ``` -### spp.metrics.fairness +### spp.metric.fairness Computes fairness/equity metrics across demographic groups. @@ -134,7 +134,7 @@ compute_fairness(registrant_ids, base_domain=None, dimensions=None) **Example:** ```python -fairness_service = env['spp.metrics.fairness'] +fairness_service = env['spp.metric.fairness'] result = fairness_service.compute_fairness( registrant_ids=[1, 2, 3], @@ -155,7 +155,7 @@ result = fairness_service.compute_fairness( # } ``` -### spp.metrics.distribution +### spp.metric.distribution Computes distribution statistics for numerical values. @@ -175,7 +175,7 @@ compute_distribution(amounts) **Example:** ```python -distribution_service = env['spp.metrics.distribution'] +distribution_service = env['spp.metric.distribution'] amounts = [100, 200, 150, 300, 250] stats = distribution_service.compute_distribution(amounts) @@ -190,7 +190,7 @@ stats = distribution_service.compute_distribution(amounts) # } ``` -### spp.metrics.privacy +### spp.metric.privacy Enforces k-anonymity privacy protection on aggregation results. @@ -198,8 +198,7 @@ Enforces k-anonymity privacy protection on aggregation results. ```python enforce(result, k_threshold=None, access_level="aggregate") -validate_access_level(user=None) -get_k_threshold(user=None, context=None) +suppress_value(value, count, k_threshold=None, stat_config=None) ``` **Features:** @@ -212,7 +211,7 @@ get_k_threshold(user=None, context=None) **Example:** ```python -privacy_service = env['spp.metrics.privacy'] +privacy_service = env['spp.metric.privacy'] result = { 'total_count': 3, # Below threshold @@ -230,7 +229,7 @@ protected = privacy_service.enforce(result, k_threshold=10) # } ``` -### spp.metrics.dimension.cache +### spp.metric.dimension.cache Performance cache for dimension evaluations. @@ -266,32 +265,31 @@ result = breakdown.compute_breakdown(dimension_ids, registrant_ids) # Re-evalua ## Dependencies - `base` - Odoo core -- `spp_metrics_core` - Base metric models +- `spp_metric` - Base metric models - `spp_cel_domain` - CEL expression support - `spp_area` - Administrative areas - `spp_registry` - Registrant/partner data ## Used By -- `spp_aggregation` - Delegates to these services -- `spp_statistic` - Statistics computation +- `spp_analytics` - Delegates to these services +- `spp_indicator` - Indicator computation - `spp_simulation` - Simulation metrics - `spp_api_v2_gis` - GIS statistics API - `spp_api_v2_simulation` - Simulation API -## Migration from spp_aggregation +## Migration from spp_analytics -These services were extracted from `spp_aggregation` to enable reuse across modules. -Model names remain unchanged for backward compatibility. +These services were extracted from `spp_analytics` to enable reuse across modules. **No code changes required** - Existing code continues to work: ```python # Still works -fairness = env['spp.metrics.fairness'] -distribution = env['spp.metrics.distribution'] -privacy = env['spp.metrics.privacy'] -breakdown = env['spp.metrics.breakdown'] +fairness = env['spp.metric.fairness'] +distribution = env['spp.metric.distribution'] +privacy = env['spp.metric.privacy'] +breakdown = env['spp.metric.breakdown'] ``` See [Migration Guide](../../docs/migration/statistics-refactoring.md) for details. @@ -333,7 +331,7 @@ K-anonymity enforcement adds minimal overhead: Run tests: ```bash -./scripts/test_single_module.sh spp_metrics_services +./scripts/test_single_module.sh spp_metric_service ``` Key test scenarios: diff --git a/spp_metrics_services/README.rst b/spp_metric_service/README.rst similarity index 94% rename from spp_metrics_services/README.rst rename to spp_metric_service/README.rst index ef2ed5d1..b38d50bb 100644 --- a/spp_metrics_services/README.rst +++ b/spp_metric_service/README.rst @@ -1,6 +1,6 @@ -======================== -OpenSPP Metrics Services -======================== +====================== +OpenSPP Metric Service +====================== .. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -17,7 +17,7 @@ OpenSPP Metrics Services :target: http://www.gnu.org/licenses/lgpl-3.0-standalone.html :alt: License: LGPL-3 .. |badge3| image:: https://img.shields.io/badge/github-OpenSPP%2FOpenSPP2-lightgray.png?logo=github - :target: https://github.com/OpenSPP/OpenSPP2/tree/19.0/spp_metrics_services + :target: https://github.com/OpenSPP/OpenSPP2/tree/19.0/spp_metric_service :alt: OpenSPP/OpenSPP2 |badge1| |badge2| |badge3| @@ -118,7 +118,7 @@ Bug Tracker Bugs are tracked on `GitHub Issues `_. In case of trouble, please check there if your issue has already been reported. If you spotted it first, help us to smash it by providing a detailed and welcomed -`feedback `_. +`feedback `_. Do not contact contributors directly about support or help with technical issues. @@ -141,6 +141,6 @@ Current maintainer: |maintainer-jeremi| -This module is part of the `OpenSPP/OpenSPP2 `_ project on GitHub. +This module is part of the `OpenSPP/OpenSPP2 `_ project on GitHub. You are welcome to contribute. \ No newline at end of file diff --git a/spp_metrics_services/__init__.py b/spp_metric_service/__init__.py similarity index 100% rename from spp_metrics_services/__init__.py rename to spp_metric_service/__init__.py diff --git a/spp_metrics_services/__manifest__.py b/spp_metric_service/__manifest__.py similarity index 84% rename from spp_metrics_services/__manifest__.py rename to spp_metric_service/__manifest__.py index 110f1290..e77c4ce9 100644 --- a/spp_metrics_services/__manifest__.py +++ b/spp_metric_service/__manifest__.py @@ -1,7 +1,7 @@ # Part of OpenSPP. See LICENSE file for full copyright and licensing details. { - "name": "OpenSPP Metrics Services", - "summary": "Shared services for fairness, distribution, breakdown, and privacy", + "name": "OpenSPP Metric Service", + "summary": "Computation services for fairness, distribution, breakdown, and privacy", "category": "OpenSPP", "version": "19.0.2.0.0", "sequence": 1, diff --git a/spp_metrics_services/data/demographic_dimensions.xml b/spp_metric_service/data/demographic_dimensions.xml similarity index 100% rename from spp_metrics_services/data/demographic_dimensions.xml rename to spp_metric_service/data/demographic_dimensions.xml diff --git a/spp_metrics_services/models/__init__.py b/spp_metric_service/models/__init__.py similarity index 100% rename from spp_metrics_services/models/__init__.py rename to spp_metric_service/models/__init__.py diff --git a/spp_metrics_services/models/breakdown_service.py b/spp_metric_service/models/breakdown_service.py similarity index 97% rename from spp_metrics_services/models/breakdown_service.py rename to spp_metric_service/models/breakdown_service.py index 5f9b978e..acb1524b 100644 --- a/spp_metrics_services/models/breakdown_service.py +++ b/spp_metric_service/models/breakdown_service.py @@ -14,7 +14,7 @@ class BreakdownService(models.AbstractModel): counts and statistics per dimension combination. """ - _name = "spp.metrics.breakdown" + _name = "spp.metric.breakdown" _description = "Breakdown Computation Service" @api.model @@ -57,7 +57,7 @@ def compute_breakdown(self, registrant_ids, group_by, statistics=None, context=N return {} # Get cache service - cache_service = self.env["spp.metrics.dimension.cache"] + cache_service = self.env["spp.metric.dimension.cache"] # Get cached evaluations for all dimensions dimension_evaluations = {} diff --git a/spp_metrics_services/models/demographic_dimension.py b/spp_metric_service/models/demographic_dimension.py similarity index 98% rename from spp_metrics_services/models/demographic_dimension.py rename to spp_metric_service/models/demographic_dimension.py index 95fad6ef..03900eb7 100644 --- a/spp_metrics_services/models/demographic_dimension.py +++ b/spp_metric_service/models/demographic_dimension.py @@ -266,14 +266,14 @@ def get_active_dimensions(self, applies_to=None): def write(self, vals): """Clear cache when dimension configuration changes.""" result = super().write(vals) - cache_service = self.env["spp.metrics.dimension.cache"] + cache_service = self.env["spp.metric.dimension.cache"] for record in self: cache_service.clear_dimension_cache(record.id) return result def unlink(self): """Clear cache when dimension is deleted.""" - cache_service = self.env["spp.metrics.dimension.cache"] + cache_service = self.env["spp.metric.dimension.cache"] for record in self: cache_service.clear_dimension_cache(record.id) return super().unlink() diff --git a/spp_metrics_services/models/dimension_cache.py b/spp_metric_service/models/dimension_cache.py similarity index 99% rename from spp_metrics_services/models/dimension_cache.py rename to spp_metric_service/models/dimension_cache.py index 8460b41c..80aebb3b 100644 --- a/spp_metrics_services/models/dimension_cache.py +++ b/spp_metric_service/models/dimension_cache.py @@ -20,7 +20,7 @@ class DimensionCacheService(models.AbstractModel): Invalidation: On dimension write/unlink """ - _name = "spp.metrics.dimension.cache" + _name = "spp.metric.dimension.cache" _description = "Dimension Evaluation Cache" @api.model diff --git a/spp_metrics_services/models/distribution_service.py b/spp_metric_service/models/distribution_service.py similarity index 99% rename from spp_metrics_services/models/distribution_service.py rename to spp_metric_service/models/distribution_service.py index 37b58939..0dfcb165 100644 --- a/spp_metrics_services/models/distribution_service.py +++ b/spp_metric_service/models/distribution_service.py @@ -15,7 +15,7 @@ class DistributionService(models.AbstractModel): inequality metrics from a list of numerical values. """ - _name = "spp.metrics.distribution" + _name = "spp.metric.distribution" _description = "Distribution Computation Service" @api.model diff --git a/spp_metrics_services/models/fairness_service.py b/spp_metric_service/models/fairness_service.py similarity index 99% rename from spp_metrics_services/models/fairness_service.py rename to spp_metric_service/models/fairness_service.py index 3c60b8e3..111eb350 100644 --- a/spp_metrics_services/models/fairness_service.py +++ b/spp_metric_service/models/fairness_service.py @@ -18,7 +18,7 @@ class FairnessService(models.AbstractModel): Uses configurable DemographicDimension records for analysis. """ - _name = "spp.metrics.fairness" + _name = "spp.metric.fairness" _description = "Fairness Analysis Service" @api.model @@ -443,7 +443,7 @@ def _analyze_expression_dimension( categories = {} # Check if cache service available for batch evaluation - cache_service = self.env.get("spp.metrics.dimension.cache") + cache_service = self.env.get("spp.metric.dimension.cache") if cache_service: # Use batch evaluation with caching evaluations = cache_service.evaluate_dimension_batch(dimension, population.ids) diff --git a/spp_metrics_services/models/privacy_service.py b/spp_metric_service/models/privacy_service.py similarity index 88% rename from spp_metrics_services/models/privacy_service.py rename to spp_metric_service/models/privacy_service.py index 341d5789..14160081 100644 --- a/spp_metrics_services/models/privacy_service.py +++ b/spp_metric_service/models/privacy_service.py @@ -22,7 +22,7 @@ class PrivacyEnforcerService(models.AbstractModel): Also handles access level enforcement (aggregate vs individual). """ - _name = "spp.metrics.privacy" + _name = "spp.metric.privacy" _description = "Privacy Enforcement Service" DEFAULT_K_THRESHOLD = 5 @@ -351,48 +351,3 @@ def suppress_value(self, value, count, k_threshold=None, stat_config=None): return formatted, True return value, False - - @api.model - def validate_access_level(self, user=None): - """ - Determine the access level for a user. - - :param user: res.users record (defaults to current user) - :returns: "aggregate" or "individual" - :rtype: str - """ - user = user or self.env.user - - # Check for access rule (use sudo for internal security check) - # Use defensive lookup - model may not be installed - access_rule_model = self.env.get("spp.aggregation.access.rule") - if access_rule_model is not None: - rule = access_rule_model.sudo().get_effective_rule_for_user(user) # nosemgrep: odoo-sudo-without-context - if rule: - return rule.access_level - - # Default to aggregate-only for safety - return "aggregate" - - @api.model - def get_k_threshold(self, user=None, context=None): - """ - Get the k-anonymity threshold for a user/context. - - :param user: res.users record (defaults to current user) - :param context: Optional context string (e.g., "api", "dashboard") - :returns: k threshold value - :rtype: int - """ - user = user or self.env.user - - # Check for access rule (use sudo for internal security check) - # Use defensive lookup - model may not be installed - access_rule_model = self.env.get("spp.aggregation.access.rule") - if access_rule_model is not None: - rule = access_rule_model.sudo().get_effective_rule_for_user(user) # nosemgrep: odoo-sudo-without-context - if rule: - return rule.minimum_k_anonymity - - # Default threshold - return self.DEFAULT_K_THRESHOLD diff --git a/spp_statistic_studio/pyproject.toml b/spp_metric_service/pyproject.toml similarity index 100% rename from spp_statistic_studio/pyproject.toml rename to spp_metric_service/pyproject.toml diff --git a/spp_metrics_services/readme/DESCRIPTION.md b/spp_metric_service/readme/DESCRIPTION.md similarity index 100% rename from spp_metrics_services/readme/DESCRIPTION.md rename to spp_metric_service/readme/DESCRIPTION.md diff --git a/spp_metrics_services/security/ir.model.access.csv b/spp_metric_service/security/ir.model.access.csv similarity index 100% rename from spp_metrics_services/security/ir.model.access.csv rename to spp_metric_service/security/ir.model.access.csv diff --git a/spp_metrics_services/static/description/icon.png b/spp_metric_service/static/description/icon.png similarity index 100% rename from spp_metrics_services/static/description/icon.png rename to spp_metric_service/static/description/icon.png diff --git a/spp_metric_service/static/description/index.html b/spp_metric_service/static/description/index.html new file mode 100644 index 00000000..a6fb995b --- /dev/null +++ b/spp_metric_service/static/description/index.html @@ -0,0 +1 @@ +

    spp_metric_service

    diff --git a/spp_metrics_services/tests/__init__.py b/spp_metric_service/tests/__init__.py similarity index 100% rename from spp_metrics_services/tests/__init__.py rename to spp_metric_service/tests/__init__.py diff --git a/spp_metrics_services/tests/test_coverage.py b/spp_metric_service/tests/test_coverage.py similarity index 68% rename from spp_metrics_services/tests/test_coverage.py rename to spp_metric_service/tests/test_coverage.py index 5c0ff133..16ed9861 100644 --- a/spp_metrics_services/tests/test_coverage.py +++ b/spp_metric_service/tests/test_coverage.py @@ -504,7 +504,7 @@ def test_write_clears_cache(self): "field_path": "name", } ) - cache_service = self.env["spp.metrics.dimension.cache"] + cache_service = self.env["spp.metric.dimension.cache"] with patch.object(type(cache_service), "clear_dimension_cache") as mock_clear: dim.write({"label": "Updated Label"}) mock_clear.assert_called() @@ -519,7 +519,7 @@ def test_unlink_clears_cache(self): "field_path": "name", } ) - cache_service = self.env["spp.metrics.dimension.cache"] + cache_service = self.env["spp.metric.dimension.cache"] with patch.object(type(cache_service), "clear_dimension_cache") as mock_clear: dim.unlink() mock_clear.assert_called() @@ -534,7 +534,7 @@ def setUpClass(cls): super().setUpClass() cls.partner_model = cls.env["res.partner"] - cls.fairness_service = cls.env["spp.metrics.fairness"] + cls.fairness_service = cls.env["spp.metric.fairness"] cls.dim_model = cls.env["spp.demographic.dimension"] # Create test registrants: mix of groups and individuals @@ -828,7 +828,7 @@ class TestPrivacyServiceExtended(TransactionCase): @classmethod def setUpClass(cls): super().setUpClass() - cls.privacy_service = cls.env["spp.metrics.privacy"] + cls.privacy_service = cls.env["spp.metric.privacy"] # ------------------------------------------------------------------------- # _find_cells_in_slice @@ -907,49 +907,6 @@ def test_find_dimension_siblings_different_dim_count(self): self.assertIn("male|rural", result) self.assertNotIn("male", result) - # ------------------------------------------------------------------------- - # validate_access_level - # ------------------------------------------------------------------------- - - def test_validate_access_level_without_access_rule_model(self): - """validate_access_level defaults to 'aggregate' when model unavailable.""" - with patch.object(type(self.env), "get", return_value=None): - result = self.privacy_service.validate_access_level() - self.assertEqual(result, "aggregate") - - def test_validate_access_level_default_user(self): - """validate_access_level uses current user by default.""" - # Without the access rule model installed, should default to aggregate - result = self.privacy_service.validate_access_level() - self.assertEqual(result, "aggregate") - - def test_validate_access_level_explicit_user(self): - """validate_access_level with explicit user parameter.""" - user = self.env.user - result = self.privacy_service.validate_access_level(user=user) - self.assertEqual(result, "aggregate") - - # ------------------------------------------------------------------------- - # get_k_threshold - # ------------------------------------------------------------------------- - - def test_get_k_threshold_without_access_rule_model(self): - """get_k_threshold defaults to DEFAULT_K_THRESHOLD when model unavailable.""" - with patch.object(type(self.env), "get", return_value=None): - result = self.privacy_service.get_k_threshold() - self.assertEqual(result, self.privacy_service.DEFAULT_K_THRESHOLD) - - def test_get_k_threshold_default(self): - """get_k_threshold returns default threshold.""" - result = self.privacy_service.get_k_threshold() - self.assertEqual(result, 5) - - def test_get_k_threshold_explicit_user(self): - """get_k_threshold with explicit user parameter.""" - user = self.env.user - result = self.privacy_service.get_k_threshold(user=user, context="api") - self.assertEqual(result, 5) - # ------------------------------------------------------------------------- # _find_siblings # ------------------------------------------------------------------------- @@ -1103,7 +1060,7 @@ class TestDistributionServiceEdgeCases(TransactionCase): @classmethod def setUpClass(cls): super().setUpClass() - cls.dist_service = cls.env["spp.metrics.distribution"] + cls.dist_service = cls.env["spp.metric.distribution"] def test_empty_distribution(self): """_empty_distribution returns zeroed structure.""" @@ -1156,3 +1113,499 @@ def test_percentile_edge(self): """Percentile computation on small dataset.""" result = self.dist_service.compute_distribution([10, 20]) self.assertEqual(result["percentiles"]["p50"], 15.0) + + +@tagged("post_install", "-at_install") +class TestPrivacyServiceStripIds(TransactionCase): + """Test privacy service _strip_individual_ids and enforce access levels.""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.privacy_service = cls.env["spp.metric.privacy"] + + # ------------------------------------------------------------------------- + # _strip_individual_ids + # ------------------------------------------------------------------------- + + def test_strip_individual_ids_removes_registrant_ids(self): + """_strip_individual_ids removes 'registrant_ids' key from result.""" + result = {"total": 10, "registrant_ids": [1, 2, 3]} + stripped = self.privacy_service._strip_individual_ids(result) + self.assertNotIn("registrant_ids", stripped) + self.assertEqual(stripped["total"], 10) + + def test_strip_individual_ids_removes_partner_ids(self): + """_strip_individual_ids removes 'partner_ids' key from result.""" + result = {"total": 5, "partner_ids": [4, 5]} + stripped = self.privacy_service._strip_individual_ids(result) + self.assertNotIn("partner_ids", stripped) + + def test_strip_individual_ids_removes_ids(self): + """_strip_individual_ids removes 'ids' key from result.""" + result = {"total": 5, "ids": [6, 7, 8]} + stripped = self.privacy_service._strip_individual_ids(result) + self.assertNotIn("ids", stripped) + + def test_strip_individual_ids_no_ids_present(self): + """_strip_individual_ids on result without ID keys returns unchanged.""" + result = {"total": 10, "breakdown": {"male": {"count": 10}}} + stripped = self.privacy_service._strip_individual_ids(result) + self.assertIn("total", stripped) + self.assertIn("breakdown", stripped) + + def test_strip_individual_ids_does_not_modify_original(self): + """_strip_individual_ids does not mutate the input dict.""" + original = {"total": 10, "registrant_ids": [1, 2]} + original_copy = dict(original) + self.privacy_service._strip_individual_ids(original) + self.assertEqual(original, original_copy) + + def test_strip_individual_ids_removes_ids_from_breakdown_cells(self): + """_strip_individual_ids removes ID keys from nested breakdown cells.""" + result = { + "total": 20, + "breakdown": { + "male": {"count": 10, "registrant_ids": [1, 2], "partner_ids": [3]}, + "female": {"count": 10, "ids": [4, 5]}, + }, + } + stripped = self.privacy_service._strip_individual_ids(result) + male_cell = stripped["breakdown"]["male"] + female_cell = stripped["breakdown"]["female"] + self.assertNotIn("registrant_ids", male_cell) + self.assertNotIn("partner_ids", male_cell) + self.assertNotIn("ids", female_cell) + self.assertEqual(male_cell["count"], 10) + + def test_strip_individual_ids_skips_non_dict_breakdown_cells(self): + """_strip_individual_ids skips breakdown values that are not dicts.""" + result = { + "total": 10, + "breakdown": { + "male": "not a dict", + "female": {"count": 10, "registrant_ids": [1]}, + }, + } + # Should not raise an error + stripped = self.privacy_service._strip_individual_ids(result) + self.assertEqual(stripped["breakdown"]["male"], "not a dict") + self.assertNotIn("registrant_ids", stripped["breakdown"]["female"]) + + # ------------------------------------------------------------------------- + # enforce() with access_level + # ------------------------------------------------------------------------- + + def test_enforce_individual_access_level_preserves_ids(self): + """enforce with access_level='individual' does not strip registrant_ids.""" + result = { + "total": 20, + "registrant_ids": [1, 2, 3], + "breakdown": { + "male": {"count": 10}, + "female": {"count": 10}, + }, + } + protected = self.privacy_service.enforce(result, access_level="individual") + self.assertIn("registrant_ids", protected) + self.assertEqual(protected["registrant_ids"], [1, 2, 3]) + + def test_enforce_aggregate_access_level_strips_ids(self): + """enforce with access_level='aggregate' removes registrant_ids.""" + result = { + "total": 20, + "registrant_ids": [1, 2, 3], + "breakdown": { + "male": {"count": 10}, + "female": {"count": 10}, + }, + } + protected = self.privacy_service.enforce(result, access_level="aggregate") + self.assertNotIn("registrant_ids", protected) + + def test_enforce_aggregate_applies_k_anonymity(self): + """enforce with access_level='aggregate' applies k-anonymity to breakdown.""" + result = { + "total": 20, + "breakdown": { + "male": {"count": 2}, # below threshold + "female": {"count": 18}, + }, + } + protected = self.privacy_service.enforce(result, k_threshold=5, access_level="aggregate") + self.assertTrue(protected["breakdown"]["male"].get("suppressed")) + + def test_enforce_custom_k_threshold(self): + """enforce with custom k_threshold suppresses at that threshold.""" + result = { + "total": 30, + "breakdown": { + "male": {"count": 8}, # below custom threshold of 10 + "female": {"count": 22}, + }, + } + protected = self.privacy_service.enforce(result, k_threshold=10, access_level="aggregate") + self.assertTrue(protected["breakdown"]["male"].get("suppressed")) + + def test_enforce_custom_k_threshold_not_suppressed(self): + """enforce with custom k_threshold does not suppress counts above it.""" + result = { + "total": 30, + "breakdown": { + "male": {"count": 15}, + "female": {"count": 15}, + }, + } + protected = self.privacy_service.enforce(result, k_threshold=10, access_level="aggregate") + self.assertFalse(protected["breakdown"]["male"].get("suppressed", False)) + + def test_enforce_no_breakdown_key(self): + """enforce on result without 'breakdown' key works without error.""" + result = {"total": 100, "registrant_ids": [1, 2]} + protected = self.privacy_service.enforce(result, access_level="aggregate") + self.assertNotIn("breakdown", protected) + self.assertNotIn("registrant_ids", protected) + + def test_enforce_does_not_modify_original(self): + """enforce does not mutate the input result dict.""" + result = { + "total": 10, + "registrant_ids": [1], + "breakdown": {"male": {"count": 2}}, + } + original_ids = result["registrant_ids"][:] + self.privacy_service.enforce(result, access_level="aggregate") + # Original should be unchanged + self.assertIn("registrant_ids", result) + self.assertEqual(result["registrant_ids"], original_ids) + + +@tagged("post_install", "-at_install") +class TestDimensionCacheMakeKey(TransactionCase): + """Test dimension cache _make_registrant_key method.""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.cache_service = cls.env["spp.metric.dimension.cache"] + + def test_make_registrant_key_returns_string(self): + """_make_registrant_key returns a string.""" + key = self.cache_service._make_registrant_key([1, 2, 3]) + self.assertIsInstance(key, str) + + def test_make_registrant_key_consistent(self): + """_make_registrant_key returns the same hash for the same IDs.""" + key1 = self.cache_service._make_registrant_key([1, 2, 3]) + key2 = self.cache_service._make_registrant_key([1, 2, 3]) + self.assertEqual(key1, key2) + + def test_make_registrant_key_order_independent(self): + """_make_registrant_key returns the same hash regardless of input order.""" + key1 = self.cache_service._make_registrant_key([3, 1, 2]) + key2 = self.cache_service._make_registrant_key([1, 2, 3]) + self.assertEqual(key1, key2) + + def test_make_registrant_key_different_ids_differ(self): + """_make_registrant_key produces different hashes for different ID sets.""" + key1 = self.cache_service._make_registrant_key([1, 2, 3]) + key2 = self.cache_service._make_registrant_key([4, 5, 6]) + self.assertNotEqual(key1, key2) + + def test_make_registrant_key_accepts_frozenset(self): + """_make_registrant_key accepts a frozenset of IDs.""" + key_list = self.cache_service._make_registrant_key([1, 2, 3]) + key_frozenset = self.cache_service._make_registrant_key(frozenset([1, 2, 3])) + self.assertEqual(key_list, key_frozenset) + + def test_make_registrant_key_single_id(self): + """_make_registrant_key works with a single ID.""" + key = self.cache_service._make_registrant_key([42]) + self.assertIsInstance(key, str) + self.assertTrue(len(key) > 0) + + def test_make_registrant_key_empty_list(self): + """_make_registrant_key works with an empty list.""" + key = self.cache_service._make_registrant_key([]) + self.assertIsInstance(key, str) + + def test_make_registrant_key_is_md5_length(self): + """_make_registrant_key returns a 32-character MD5 hex string.""" + key = self.cache_service._make_registrant_key([1, 2, 3]) + self.assertEqual(len(key), 32) + + +@tagged("post_install", "-at_install") +class TestFairnessExpressionDimension(TransactionCase): + """Test fairness service _analyze_expression_dimension method.""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.partner_model = cls.env["res.partner"] + cls.fairness_service = cls.env["spp.metric.fairness"] + cls.dim_model = cls.env["spp.demographic.dimension"] + + # Create test registrants + cls.registrants = cls.partner_model.browse() + for i in range(6): + reg = cls.partner_model.create( + { + "name": f"Expr Fairness Registrant {i}", + "is_registrant": True, + "is_group": i % 2 == 0, + } + ) + cls.registrants |= reg + + cls.all_ids = cls.registrants.ids + + # Create expression-type dimension + cls.expr_dim = cls.dim_model.create( + { + "name": "fairness_expr_dim", + "label": "Expression Dimension", + "dimension_type": "expression", + "cel_expression": "record.is_group ? 'group' : 'individual'", + "applies_to": "all", + "default_value": "unknown", + } + ) + + def test_analyze_expression_dimension_no_cel_service(self): + """_analyze_expression_dimension returns None when CEL service unavailable.""" + beneficiary_set = set(self.all_ids[:3]) + base_domain = [("id", "in", self.all_ids)] + overall_coverage = len(beneficiary_set) / len(self.all_ids) + + # Patch env.get to return None for CEL service + with patch.object(type(self.env), "get", return_value=None): + result = self.fairness_service._analyze_expression_dimension( + self.expr_dim, + beneficiary_set, + base_domain, + overall_coverage, + self.partner_model, + ) + self.assertIsNone(result) + + def test_analyze_expression_dimension_fallback_without_cache(self): + """_analyze_expression_dimension uses per-record fallback when no cache service.""" + beneficiary_set = set(self.all_ids[:3]) + base_domain = [("id", "in", self.all_ids)] + overall_coverage = len(beneficiary_set) / len(self.all_ids) + + cel_service = self.env.get("spp.cel.service") + if not cel_service: + self.skipTest("CEL service not available") + + # Patch env.get to return cel_service for spp.cel.service but None for cache + def mock_get(key): + if key == "spp.cel.service": + return cel_service + return None + + with patch.object(type(self.env), "get", side_effect=mock_get): + result = self.fairness_service._analyze_expression_dimension( + self.expr_dim, + beneficiary_set, + base_domain, + overall_coverage, + self.partner_model, + ) + + # If CEL evaluates successfully, result should be a dict with groups + # If CEL expression fails, evaluate_for_record returns default + if result is not None: + self.assertIn("groups", result) + self.assertIn("attribute", result) + self.assertEqual(result["attribute"], "fairness_expr_dim") + + def test_analyze_expression_dimension_empty_categories(self): + """_analyze_expression_dimension returns None when no categories found.""" + beneficiary_set = set(self.all_ids[:3]) + # Domain that matches nothing + base_domain = [("id", "=", -1)] + overall_coverage = 0.5 + + cel_service = self.env.get("spp.cel.service") + if not cel_service: + self.skipTest("CEL service not available") + + result = self.fairness_service._analyze_expression_dimension( + self.expr_dim, + beneficiary_set, + base_domain, + overall_coverage, + self.partner_model, + ) + # Empty population means no categories, so result is None + self.assertIsNone(result) + + def test_analyze_dimension_routes_to_expression(self): + """_analyze_dimension routes expression-type dimensions correctly.""" + beneficiary_set = set(self.all_ids[:3]) + base_domain = [("id", "in", self.all_ids)] + overall_coverage = len(beneficiary_set) / len(self.all_ids) + + # Patch _analyze_expression_dimension to verify it is called + with patch.object( + type(self.fairness_service), + "_analyze_expression_dimension", + return_value=None, + ) as mock_method: + self.fairness_service._analyze_dimension( + self.expr_dim, + beneficiary_set, + base_domain, + overall_coverage, + self.partner_model, + ) + mock_method.assert_called_once() + + +@tagged("post_install", "-at_install") +class TestBreakdownServiceComprehensive(TransactionCase): + """Comprehensive tests for breakdown service compute_breakdown.""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.partner_model = cls.env["res.partner"] + cls.breakdown_service = cls.env["spp.metric.breakdown"] + cls.dim_model = cls.env["spp.demographic.dimension"] + + # Create test registrants: mix of groups and individuals + cls.individuals = cls.partner_model.browse() + cls.groups = cls.partner_model.browse() + + for i in range(4): + ind = cls.partner_model.create( + { + "name": f"Breakdown Individual {i}", + "is_registrant": True, + "is_group": False, + } + ) + cls.individuals |= ind + + for i in range(3): + grp = cls.partner_model.create( + { + "name": f"Breakdown Group {i}", + "is_registrant": True, + "is_group": True, + } + ) + cls.groups |= grp + + cls.all_registrants = cls.individuals | cls.groups + cls.all_ids = cls.all_registrants.ids + + # Create two boolean dimensions for multi-dimensional breakdown + cls.is_group_dim = cls.dim_model.create( + { + "name": "breakdown_is_group", + "label": "Is Group", + "dimension_type": "field", + "field_path": "is_group", + "applies_to": "all", + } + ) + cls.is_registrant_dim = cls.dim_model.create( + { + "name": "breakdown_is_registrant", + "label": "Is Registrant", + "dimension_type": "field", + "field_path": "is_registrant", + "applies_to": "all", + } + ) + + def test_compute_breakdown_empty_registrant_ids(self): + """compute_breakdown with empty registrant_ids returns empty dict.""" + result = self.breakdown_service.compute_breakdown([], ["breakdown_is_group"]) + self.assertEqual(result, {}) + + def test_compute_breakdown_empty_group_by(self): + """compute_breakdown with empty group_by returns empty dict.""" + result = self.breakdown_service.compute_breakdown(self.all_ids, []) + self.assertEqual(result, {}) + + def test_compute_breakdown_nonexistent_dimension(self): + """compute_breakdown with only non-existent dimension names returns empty dict.""" + result = self.breakdown_service.compute_breakdown(self.all_ids, ["this_dimension_does_not_exist_xyz"]) + self.assertEqual(result, {}) + + def test_compute_breakdown_single_dimension(self): + """compute_breakdown with one dimension produces keyed breakdown.""" + result = self.breakdown_service.compute_breakdown(self.all_ids, ["breakdown_is_group"]) + self.assertIsInstance(result, dict) + self.assertTrue(len(result) > 0) + + # Keys should be single-part (no pipes) + for key in result: + self.assertNotIn("|", key) + + # Counts should sum to total + total = sum(cell["count"] for cell in result.values()) + self.assertEqual(total, len(self.all_ids)) + + def test_compute_breakdown_multi_dimensional(self): + """compute_breakdown with two dimensions produces pipe-separated keys.""" + result = self.breakdown_service.compute_breakdown( + self.all_ids, ["breakdown_is_group", "breakdown_is_registrant"] + ) + self.assertIsInstance(result, dict) + self.assertTrue(len(result) > 0) + + # All keys should be pipe-separated with exactly one pipe + for key in result: + parts = key.split("|") + self.assertEqual(len(parts), 2) + + # Counts should sum to total + total = sum(cell["count"] for cell in result.values()) + self.assertEqual(total, len(self.all_ids)) + + def test_compute_breakdown_cell_structure(self): + """compute_breakdown cells contain count, statistics, and labels.""" + result = self.breakdown_service.compute_breakdown(self.all_ids, ["breakdown_is_group"]) + for cell in result.values(): + self.assertIn("count", cell) + self.assertIn("statistics", cell) + self.assertIn("labels", cell) + self.assertIsInstance(cell["count"], int) + self.assertIsInstance(cell["statistics"], dict) + self.assertIsInstance(cell["labels"], dict) + + def test_compute_breakdown_labels_structure(self): + """compute_breakdown labels contain value and display for each dimension.""" + result = self.breakdown_service.compute_breakdown(self.all_ids, ["breakdown_is_group"]) + for cell in result.values(): + self.assertIn("breakdown_is_group", cell["labels"]) + dim_label = cell["labels"]["breakdown_is_group"] + self.assertIn("value", dim_label) + self.assertIn("display", dim_label) + + def test_compute_breakdown_multi_dim_labels_structure(self): + """compute_breakdown multi-dim cells contain labels for each dimension.""" + result = self.breakdown_service.compute_breakdown( + self.all_ids, ["breakdown_is_group", "breakdown_is_registrant"] + ) + for cell in result.values(): + self.assertIn("breakdown_is_group", cell["labels"]) + self.assertIn("breakdown_is_registrant", cell["labels"]) + + def test_compute_breakdown_partial_nonexistent_dimension(self): + """compute_breakdown with mixed valid/nonexistent names uses only valid ones.""" + result = self.breakdown_service.compute_breakdown( + self.all_ids, + ["breakdown_is_group", "nonexistent_dimension_xyz"], + ) + # Only the valid dimension is used, so keys should be single-part + self.assertIsInstance(result, dict) + self.assertTrue(len(result) > 0) + for key in result: + # Only the valid dimension is used + self.assertNotIn("|", key) diff --git a/spp_metrics_services/tests/test_dimension_cache.py b/spp_metric_service/tests/test_dimension_cache.py similarity index 98% rename from spp_metrics_services/tests/test_dimension_cache.py rename to spp_metric_service/tests/test_dimension_cache.py index e9a9151b..ccfa17ee 100644 --- a/spp_metrics_services/tests/test_dimension_cache.py +++ b/spp_metric_service/tests/test_dimension_cache.py @@ -11,7 +11,7 @@ class TestDimensionCache(TransactionCase): @classmethod def setUpClass(cls): super().setUpClass() - cls.cache_service = cls.env["spp.metrics.dimension.cache"] + cls.cache_service = cls.env["spp.metric.dimension.cache"] cls.dimension_model = cls.env["spp.demographic.dimension"] cls.partner_model = cls.env["res.partner"] @@ -190,7 +190,7 @@ def test_cache_with_none_dimension(self): def test_breakdown_service_uses_cache(self): """Test that breakdown service benefits from caching.""" - breakdown_service = self.env["spp.metrics.breakdown"] + breakdown_service = self.env["spp.metric.breakdown"] # Clear cache first self.cache_service.clear_dimension_cache() diff --git a/spp_metrics_services/tests/test_services.py b/spp_metric_service/tests/test_services.py similarity index 92% rename from spp_metrics_services/tests/test_services.py rename to spp_metric_service/tests/test_services.py index c44fb924..6720f6b2 100644 --- a/spp_metrics_services/tests/test_services.py +++ b/spp_metric_service/tests/test_services.py @@ -39,27 +39,27 @@ def setUpClass(cls): def test_fairness_service_exists(self): """Test that fairness service is accessible.""" - fairness_service = self.env.get("spp.metrics.fairness") + fairness_service = self.env.get("spp.metric.fairness") self.assertIsNotNone(fairness_service, "Fairness service should be accessible") def test_distribution_service_exists(self): """Test that distribution service is accessible.""" - distribution_service = self.env.get("spp.metrics.distribution") + distribution_service = self.env.get("spp.metric.distribution") self.assertIsNotNone(distribution_service, "Distribution service should be accessible") def test_privacy_service_exists(self): """Test that privacy service is accessible.""" - privacy_service = self.env.get("spp.metrics.privacy") + privacy_service = self.env.get("spp.metric.privacy") self.assertIsNotNone(privacy_service, "Privacy service should be accessible") def test_breakdown_service_exists(self): """Test that breakdown service is accessible.""" - breakdown_service = self.env.get("spp.metrics.breakdown") + breakdown_service = self.env.get("spp.metric.breakdown") self.assertIsNotNone(breakdown_service, "Breakdown service should be accessible") def test_fairness_service_compute(self): """Test fairness computation works.""" - fairness_service = self.env["spp.metrics.fairness"] + fairness_service = self.env["spp.metric.fairness"] result = fairness_service.compute_fairness( self.registrant_ids, base_domain=[("is_registrant", "=", True)], @@ -72,7 +72,7 @@ def test_fairness_service_compute(self): def test_distribution_service_compute(self): """Test distribution computation works.""" - distribution_service = self.env["spp.metrics.distribution"] + distribution_service = self.env["spp.metric.distribution"] amounts = [100, 200, 300, 400, 500] result = distribution_service.compute_distribution(amounts) @@ -85,7 +85,7 @@ def test_distribution_service_compute(self): def test_privacy_service_enforce(self): """Test privacy enforcement works.""" - privacy_service = self.env["spp.metrics.privacy"] + privacy_service = self.env["spp.metric.privacy"] test_result = { "total_count": 10, "breakdown": { @@ -102,7 +102,7 @@ def test_privacy_service_enforce(self): def test_breakdown_service_compute(self): """Test breakdown computation works.""" - breakdown_service = self.env["spp.metrics.breakdown"] + breakdown_service = self.env["spp.metric.breakdown"] # Create a simple dimension (if spp.demographic.dimension exists) dimension_model = self.env.get("spp.demographic.dimension") @@ -113,9 +113,9 @@ def test_breakdown_service_compute(self): def test_empty_inputs(self): """Test services handle empty inputs gracefully.""" - fairness_service = self.env["spp.metrics.fairness"] - distribution_service = self.env["spp.metrics.distribution"] - breakdown_service = self.env["spp.metrics.breakdown"] + fairness_service = self.env["spp.metric.fairness"] + distribution_service = self.env["spp.metric.distribution"] + breakdown_service = self.env["spp.metric.breakdown"] # Test fairness with empty registrants fairness_result = fairness_service.compute_fairness([]) @@ -138,7 +138,7 @@ class TestPrivacySuppressionUnified(TransactionCase): @classmethod def setUpClass(cls): super().setUpClass() - cls.privacy_service = cls.env["spp.metrics.privacy"] + cls.privacy_service = cls.env["spp.metric.privacy"] def test_suppress_value_no_suppression_default_threshold(self): """Test suppress_value with count above default threshold (5).""" diff --git a/spp_metrics_services/views/demographic_dimension_views.xml b/spp_metric_service/views/demographic_dimension_views.xml similarity index 100% rename from spp_metrics_services/views/demographic_dimension_views.xml rename to spp_metric_service/views/demographic_dimension_views.xml diff --git a/spp_metrics_core/static/description/index.html b/spp_metrics_core/static/description/index.html deleted file mode 100644 index 8aff2b1c..00000000 --- a/spp_metrics_core/static/description/index.html +++ /dev/null @@ -1 +0,0 @@ -

    spp_metrics_core

    diff --git a/spp_metrics_services/static/description/index.html b/spp_metrics_services/static/description/index.html deleted file mode 100644 index 55ed91c0..00000000 --- a/spp_metrics_services/static/description/index.html +++ /dev/null @@ -1 +0,0 @@ -

    spp_metrics_services

    diff --git a/spp_mis_demo_v2/__manifest__.py b/spp_mis_demo_v2/__manifest__.py index 44b20aa1..f1e626e3 100644 --- a/spp_mis_demo_v2/__manifest__.py +++ b/spp_mis_demo_v2/__manifest__.py @@ -22,9 +22,9 @@ "spp_gis_report", # Registrant GPS coordinates for QGIS plugin demo "spp_registrant_gis", - # Statistics and aggregation for demo indicators - "spp_statistic", - "spp_aggregation", + # Indicators and analytics for demo indicators + "spp_indicator", + "spp_analytics", "spp_studio", # GIS API (used by QGIS plugin and PRISM frontend) "spp_api_v2_gis", diff --git a/spp_mis_demo_v2/data/demo_statistics.xml b/spp_mis_demo_v2/data/demo_statistics.xml index a59c977a..521eb6fa 100644 --- a/spp_mis_demo_v2/data/demo_statistics.xml +++ b/spp_mis_demo_v2/data/demo_statistics.xml @@ -4,7 +4,7 @@ This file creates: 1. CEL variables that define the computation logic - 2. Statistics (spp.statistic) that publish those variables to GIS/dashboards + 2. Statistics (spp.indicator) that publish those variables to GIS/dashboards The separation allows the same underlying variable to be published to multiple contexts with different presentation settings. @@ -137,14 +137,14 @@ ═══════════════════════════════════════════════════════════════════════ --> - + total_households Total Households Count of household groups in the selected area count households - + 10 @@ -152,105 +152,105 @@ - + total_members Total Members Total count of individual household members count people - + 15 - + children_under_5 Children Under 5 Count of children under 5 years old count children - + 20 - + children_under_18 Children Under 18 Count of children under 18 years old count children - + 30 - + elderly_60_plus Elderly (60+) Count of elderly persons aged 60 and above count people - + 40 - + female_members Female Members Count of female household members count people - + 45 - + male_members Male Members Count of male household members count people - + 46 - + disabled_members Disabled Members Count of household members with disabilities count people - + 50 - + enrolled_any_program Enrolled (Any Program) count households - + 60 diff --git a/spp_mis_demo_v2/tests/test_demo_statistics.py b/spp_mis_demo_v2/tests/test_demo_statistics.py index 5d428879..1bf60377 100644 --- a/spp_mis_demo_v2/tests/test_demo_statistics.py +++ b/spp_mis_demo_v2/tests/test_demo_statistics.py @@ -17,7 +17,7 @@ class TestDemoStatistics(TransactionCase): @classmethod def setUpClass(cls): super().setUpClass() - cls.stat_model = cls.env["spp.statistic"] + cls.stat_model = cls.env["spp.indicator"] # Required statistics that should be in the database cls.required_stats = [ diff --git a/spp_simulation/__manifest__.py b/spp_simulation/__manifest__.py index c86977f9..6ed88600 100644 --- a/spp_simulation/__manifest__.py +++ b/spp_simulation/__manifest__.py @@ -18,8 +18,8 @@ "spp_cel_domain", "spp_cel_widget", "spp_security", - "spp_aggregation", - "spp_metrics_core", + "spp_analytics", + "spp_metric", ], "data": [ # Security diff --git a/spp_simulation/services/simulation_service.py b/spp_simulation/services/simulation_service.py index 3d9b40b3..76455a51 100644 --- a/spp_simulation/services/simulation_service.py +++ b/spp_simulation/services/simulation_service.py @@ -13,7 +13,7 @@ class SimulationService(models.AbstractModel): """Orchestration service for running targeting simulations. This service orchestrates the simulation workflow but delegates heavy - computation to spp.aggregation.service for statistics, distribution, + computation to spp.analytics.service for statistics, distribution, and fairness analysis. """ @@ -65,15 +65,14 @@ def execute_simulation(self, scenario): amounts = self._apply_budget_strategy(scenario, amounts) # Step 4: Distribution stats - # Use spp.metrics.distribution for computation - distribution_service = self.env["spp.metrics.distribution"] + distribution_service = self.env["spp.metric.distribution"] distribution_data = distribution_service.compute_distribution(amounts) gini = distribution_data.get("gini_coefficient", 0.0) # Step 5: Fairness analysis - # Use spp.metrics.fairness for computation - fairness_service = self.env["spp.metrics.fairness"] - # Get base domain for population + fairness_service = self.env["spp.metric.fairness"] + # Derive base domain from target type so fairness compares against + # the correct population (groups vs individuals) profile = "registry_groups" if scenario.target_type == "group" else "registry_individuals" registry = self.env["spp.cel.registry"] cfg = registry.load_profile(profile) @@ -185,9 +184,9 @@ def _get_cel_profile(self, scenario): def _execute_targeting(self, scenario): """Execute the targeting expression and return all matching IDs. - NOTE: In Phase 6, this should use spp.aggregation.scope for unified - targeting. For now, it continues using CEL directly for backward - compatibility. + Uses CEL directly rather than spp.analytics.scope, since simulation + targeting has its own CEL expression lifecycle and does not need + scope-level caching or access control. """ # Load the CEL profile configuration profile = self._get_cel_profile(scenario) diff --git a/spp_simulation/tests/test_distribution_service.py b/spp_simulation/tests/test_distribution_service.py index 56f24d63..5e715045 100644 --- a/spp_simulation/tests/test_distribution_service.py +++ b/spp_simulation/tests/test_distribution_service.py @@ -9,12 +9,12 @@ class TestDistributionService(SimulationTestCommon): """Tests for distribution statistics computation. - NOTE: These tests now use spp.metrics.distribution directly. - The old spp.simulation.distribution.service has been removed (Phase 6 cleanup). + Tests use spp.metric.distribution directly to verify the underlying + computation service independently of the analytics routing layer. """ def _get_service(self): - return self.env["spp.metrics.distribution"] + return self.env["spp.metric.distribution"] def test_empty_distribution(self): """Test distribution with empty amounts list.""" diff --git a/spp_simulation/tests/test_fairness.py b/spp_simulation/tests/test_fairness.py index 05f4e232..a9e05b8b 100644 --- a/spp_simulation/tests/test_fairness.py +++ b/spp_simulation/tests/test_fairness.py @@ -26,10 +26,10 @@ def setUpClass(cls): def _get_service(self): """Get the fairness service. - NOTE: Updated to use spp.metrics.fairness directly (Phase 6 cleanup). - The old spp.simulation.fairness.service has been removed. + Uses spp.metric.fairness directly to test the underlying computation + service independently of the analytics routing layer. """ - return self.env["spp.metrics.fairness"] + return self.env["spp.metric.fairness"] def test_fairness_empty_beneficiaries(self): """Test fairness with no beneficiaries.""" @@ -76,9 +76,8 @@ def test_disparity_ratio_computation(self): def test_demographic_groups_defined(self): """Test that demographic dimensions are configured. - NOTE: This test was updated for Phase 4a migration to spp_aggregation. - The old _get_demographic_groups() method is deprecated. Demographic - groups are now configured through spp.demographic.dimension records. + Demographic groups are configured through spp.demographic.dimension + records rather than hard-coded in the simulation module. """ # Check that demographic dimensions exist dimension_model = self.env["spp.demographic.dimension"] diff --git a/spp_statistic/models/__init__.py b/spp_statistic/models/__init__.py deleted file mode 100644 index 1e589a56..00000000 --- a/spp_statistic/models/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Part of OpenSPP. See LICENSE file for full copyright and licensing details. -# statistic_category moved to spp_metrics_core as metric_category -from . import statistic -from . import statistic_context diff --git a/spp_statistic/security/ir.model.access.csv b/spp_statistic/security/ir.model.access.csv deleted file mode 100644 index f1631d6c..00000000 --- a/spp_statistic/security/ir.model.access.csv +++ /dev/null @@ -1,5 +0,0 @@ -id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink -access_spp_statistic_user,spp.statistic.user,model_spp_statistic,base.group_user,1,0,0,0 -access_spp_statistic_admin,spp.statistic.admin,model_spp_statistic,spp_security.group_spp_admin,1,1,1,1 -access_spp_statistic_context_user,spp.statistic.context.user,model_spp_statistic_context,base.group_user,1,0,0,0 -access_spp_statistic_context_admin,spp.statistic.context.admin,model_spp_statistic_context,spp_security.group_spp_admin,1,1,1,1 diff --git a/spp_statistic/static/description/index.html b/spp_statistic/static/description/index.html deleted file mode 100644 index fc385fa3..00000000 --- a/spp_statistic/static/description/index.html +++ /dev/null @@ -1 +0,0 @@ -

    spp_statistic

    diff --git a/spp_statistic_studio/security/ir.model.access.csv b/spp_statistic_studio/security/ir.model.access.csv deleted file mode 100644 index 61bb300a..00000000 --- a/spp_statistic_studio/security/ir.model.access.csv +++ /dev/null @@ -1,4 +0,0 @@ -id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink -access_spp_statistic_studio_admin,spp.statistic studio admin,spp_statistic.model_spp_statistic,spp_studio.group_studio_manager,1,1,1,1 -access_spp_metric_category_studio_admin,spp.metric.category studio admin,spp_metrics_core.model_spp_metric_category,spp_studio.group_studio_manager,1,1,1,1 -access_spp_statistic_context_studio_admin,spp.statistic.context studio admin,spp_statistic.model_spp_statistic_context,spp_studio.group_studio_manager,1,1,1,1 diff --git a/spp_statistic_studio/static/description/index.html b/spp_statistic_studio/static/description/index.html deleted file mode 100644 index f6a460cf..00000000 --- a/spp_statistic_studio/static/description/index.html +++ /dev/null @@ -1 +0,0 @@ -

    spp_statistic_studio