diff --git a/src/sentry/release_health/base.py b/src/sentry/release_health/base.py index 448e8cdacf610d..07bec0034ad5c3 100644 --- a/src/sentry/release_health/base.py +++ b/src/sentry/release_health/base.py @@ -36,6 +36,8 @@ "crash_free_rate(user)", "anr_rate()", "foreground_anr_rate()", + "unhandled_rate(session)", + "unhandled_rate(user)", ] GroupByFieldName = Literal[ @@ -182,6 +184,9 @@ class ReleaseHealthOverview(TypedDict, total=False): duration_p50: float | None duration_p90: float | None stats: Mapping[StatsPeriod, ReleaseHealthStats] + sessions_unhandled: int + unhandled_session_rate: float | None + unhandled_user_rate: float | None class CrashFreeBreakdown(TypedDict): @@ -202,6 +207,7 @@ class UserCounts(TypedDict): users_healthy: int users_crashed: int users_abnormal: int + users_unhandled: int users_errored: int @@ -214,6 +220,7 @@ class SessionCounts(TypedDict): sessions_healthy: int sessions_crashed: int sessions_abnormal: int + sessions_unhandled: int sessions_errored: int diff --git a/src/sentry/release_health/metrics.py b/src/sentry/release_health/metrics.py index 0bcd5a424a5ce7..b434e5517973fa 100644 --- a/src/sentry/release_health/metrics.py +++ b/src/sentry/release_health/metrics.py @@ -720,7 +720,7 @@ def _get_errored_sessions_for_overview( end: datetime, ) -> Mapping[tuple[int, str], int]: """ - Count of errored sessions, incl fatal (abnormal, crashed) sessions, + Count of errored sessions, incl fatal (abnormal, unhandled, crashed) session excl errored *preaggregated* sessions """ project_ids = [p.id for p in projects] @@ -774,12 +774,13 @@ def _get_session_by_status_for_overview( end: datetime, ) -> Mapping[tuple[int, str, str], int]: """ - Counts of init, abnormal and crashed sessions, purpose-built for overview + Counts of init, abnormal, unhandled and crashed sessions, purpose-built for overview """ project_ids = [p.id for p in projects] select = [ MetricField(metric_mri=SessionMRI.ABNORMAL.value, alias="abnormal", op=None), + MetricField(metric_mri=SessionMRI.UNHANDLED.value, alias="unhandled", op=None), MetricField(metric_mri=SessionMRI.CRASHED.value, alias="crashed", op=None), MetricField(metric_mri=SessionMRI.ALL.value, alias="init", op=None), MetricField( @@ -820,7 +821,13 @@ def _get_session_by_status_for_overview( release = by.get("release") totals = group.get("totals", {}) - for status in ["abnormal", "crashed", "init", "errored_preaggr"]: + for status in [ + "abnormal", + "unhandled", + "crashed", + "init", + "errored_preaggr", + ]: value = totals.get(status) if value is not None and value != 0.0: ret_val[(proj_id, release, status)] = value @@ -842,6 +849,9 @@ def _get_users_and_crashed_users_for_overview( select = [ MetricField(metric_mri=SessionMRI.ALL_USER.value, alias="all_users", op=None), MetricField(metric_mri=SessionMRI.CRASHED_USER.value, alias="crashed_users", op=None), + MetricField( + metric_mri=SessionMRI.UNHANDLED_USER.value, alias="unhandled_users", op=None + ), ] groupby = [ @@ -1037,8 +1047,10 @@ def get_release_health_data_overview( if not has_health_data and summary_stats_period != "90d": fetch_has_health_data_releases.add((project_id, release)) + sessions_unhandled = rv_sessions.get((project_id, release, "unhandled"), 0) sessions_crashed = rv_sessions.get((project_id, release, "crashed"), 0) + users_unhandled = rv_users.get((project_id, release, "unhandled_users"), 0) users_crashed = rv_users.get((project_id, release, "crashed_users"), 0) rv_row = rv[project_id, release] = { @@ -1050,19 +1062,36 @@ def get_release_health_data_overview( "total_project_sessions_24h": adoption_info.get("project_sessions_24h"), "total_sessions": total_sessions, "total_users": total_users, - "has_health_data": has_health_data, - "sessions_crashed": sessions_crashed, + # Users where the error was `unhandled`; possibly resulting in a crash + "unhandled_user_rate": ( + (users_unhandled + users_crashed) / total_users * 100 if total_users else None + ), + # Users where the error was not a crash (but may have been unhandled) "crash_free_users": ( 100 - users_crashed / total_users * 100 if total_users else None ), + "has_health_data": has_health_data, + # Sessions where the error was specifically `unhandled`; NOT resulting in a crash + "sessions_unhandled": sessions_unhandled, + # Sessions where the error was a crash + "sessions_crashed": sessions_crashed, + # Sessions where the error was `unhandled`; possibly resulting in a crash + "unhandled_session_rate": ( + (sessions_unhandled + sessions_crashed) / total_sessions * 100 + if total_sessions + else None + ), + # Sessions where the error was not a crash (but may have been unhandled) "crash_free_sessions": ( 100 - sessions_crashed / float(total_sessions) * 100 if total_sessions else None ), + # Sessions where the error was handled "sessions_errored": max( 0, rv_errored_sessions.get((project_id, release), 0) + rv_sessions.get((project_id, release, "errored_preaggr"), 0) - sessions_crashed + - sessions_unhandled - rv_sessions.get((project_id, release, "abnormal"), 0), ), "duration_p50": None, @@ -1427,6 +1456,9 @@ def get_project_release_stats( MetricField( metric_mri=SessionMRI.CRASHED_USER.value, alias="users_crashed", op=None ), + MetricField( + metric_mri=SessionMRI.UNHANDLED_USER.value, alias="users_unhandled", op=None + ), MetricField( metric_mri=SessionMRI.ERRORED_USER.value, alias="users_errored", op=None ), @@ -1440,6 +1472,11 @@ def get_project_release_stats( MetricField( metric_mri=SessionMRI.ABNORMAL.value, alias="sessions_abnormal", op=None ), + MetricField( + metric_mri=SessionMRI.UNHANDLED.value, + alias="sessions_unhandled", + op=None, + ), MetricField(metric_mri=SessionMRI.CRASHED.value, alias="sessions_crashed", op=None), MetricField(metric_mri=SessionMRI.ERRORED.value, alias="sessions_errored", op=None), MetricField(metric_mri=SessionMRI.HEALTHY.value, alias="sessions_healthy", op=None), @@ -1500,6 +1537,7 @@ def get_project_release_stats( f"{stat}": 0, f"{stat}_abnormal": 0, f"{stat}_crashed": 0, + f"{stat}_unhandled": 0, f"{stat}_errored": 0, f"{stat}_healthy": 0, } diff --git a/src/sentry/release_health/metrics_sessions_v2.py b/src/sentry/release_health/metrics_sessions_v2.py index 5c47102fdad97c..869a7e66cd48e7 100644 --- a/src/sentry/release_health/metrics_sessions_v2.py +++ b/src/sentry/release_health/metrics_sessions_v2.py @@ -1,7 +1,7 @@ -""" This module offers the same functionality as sessions_v2, but pulls its data +"""This module offers the same functionality as sessions_v2, but pulls its data from the `metrics` dataset instead of `sessions`. -Do not call this module directly. Use the `release_health` service instead. """ +Do not call this module directly. Use the `release_health` service instead.""" import logging from abc import ABC, abstractmethod @@ -73,6 +73,7 @@ class SessionStatus(Enum): CRASHED = "crashed" ERRORED = "errored" HEALTHY = "healthy" + UNHANDLED = "unhandled" ALL_STATUSES = frozenset(iter(SessionStatus)) @@ -242,6 +243,7 @@ def _get_metric_fields( self.status_to_metric_field[SessionStatus.ABNORMAL], self.status_to_metric_field[SessionStatus.CRASHED], self.status_to_metric_field[SessionStatus.ERRORED], + self.status_to_metric_field[SessionStatus.UNHANDLED], ] return [self.get_all_field()] @@ -265,6 +267,7 @@ class SumSessionField(CountField): SessionStatus.ABNORMAL: MetricField(None, SessionMRI.ABNORMAL.value), SessionStatus.CRASHED: MetricField(None, SessionMRI.CRASHED.value), SessionStatus.ERRORED: MetricField(None, SessionMRI.ERRORED.value), + SessionStatus.UNHANDLED: MetricField(None, SessionMRI.UNHANDLED.value), None: MetricField(None, SessionMRI.ALL.value), } @@ -298,6 +301,7 @@ def __init__( SessionStatus.ABNORMAL: MetricField(None, SessionMRI.ABNORMAL_USER.value), SessionStatus.CRASHED: MetricField(None, SessionMRI.CRASHED_USER.value), SessionStatus.ERRORED: MetricField(None, SessionMRI.ERRORED_USER.value), + SessionStatus.UNHANDLED: MetricField(None, SessionMRI.UNHANDLED_USER.value), None: MetricField(None, SessionMRI.ALL_USER.value), } @@ -341,6 +345,8 @@ class SimpleForwardingField(Field): "crash_free_rate(user)": SessionMRI.CRASH_FREE_USER_RATE, "anr_rate()": SessionMRI.ANR_RATE, "foreground_anr_rate()": SessionMRI.FOREGROUND_ANR_RATE, + "unhandled_rate(session)": SessionMRI.UNHANDLED_RATE, + "unhandled_rate(user)": SessionMRI.UNHANDLED_USER_RATE, } def __init__(self, name: str, raw_groupby: Sequence[str], status_filter: StatusFilter): @@ -379,6 +385,8 @@ def _get_metric_fields( "crash_free_rate(user)": SimpleForwardingField, "anr_rate()": SimpleForwardingField, "foreground_anr_rate()": SimpleForwardingField, + "unhandled_rate(session)": SimpleForwardingField, + "unhandled_rate(user)": SimpleForwardingField, } PREFLIGHT_QUERY_COLUMNS = {"release.timestamp"} VirtualOrderByName = Literal["release.timestamp"] diff --git a/src/sentry/snuba/metrics/fields/base.py b/src/sentry/snuba/metrics/fields/base.py index c3d0cec8904544..4532853690ed31 100644 --- a/src/sentry/snuba/metrics/fields/base.py +++ b/src/sentry/snuba/metrics/fields/base.py @@ -62,6 +62,8 @@ sum_if_column_snql, team_key_transaction_snql, tolerated_count_transaction, + unhandled_sessions, + unhandled_users, uniq_aggregation_on_metric, uniq_if_column_snql, ) @@ -1375,6 +1377,22 @@ def generate_where_statements( org_id, metric_ids, alias=alias ), ), + SingularEntityDerivedMetric( + metric_mri=SessionMRI.UNHANDLED.value, + metrics=[SessionMRI.RAW_SESSION.value], + unit="sessions", + snql=lambda project_ids, org_id, metric_ids, alias=None: unhandled_sessions( + org_id, metric_ids, alias=alias + ), + ), + SingularEntityDerivedMetric( + metric_mri=SessionMRI.UNHANDLED_USER.value, + metrics=[SessionMRI.RAW_USER.value], + unit="users", + snql=lambda project_ids, org_id, metric_ids, alias=None: unhandled_users( + org_id, metric_ids, alias=alias + ), + ), SingularEntityDerivedMetric( metric_mri=SessionMRI.CRASHED.value, metrics=[SessionMRI.RAW_SESSION.value], @@ -1926,7 +1944,7 @@ def metric_object_factory(op: MetricOperationType | None, metric_mri: str) -> Me def generate_bottom_up_dependency_tree_for_metrics( - metrics_query_fields_set: set[tuple[MetricOperationType | None, str, str]] + metrics_query_fields_set: set[tuple[MetricOperationType | None, str, str]], ) -> list[tuple[MetricOperationType | None, str, str]]: """ This function basically generates a dependency list for all instances of diff --git a/src/sentry/snuba/metrics/fields/snql.py b/src/sentry/snuba/metrics/fields/snql.py index 85925024144292..2d307e1bb499ca 100644 --- a/src/sentry/snuba/metrics/fields/snql.py +++ b/src/sentry/snuba/metrics/fields/snql.py @@ -246,6 +246,20 @@ def all_users(org_id: int, metric_ids: Sequence[int], alias: str | None = None) return uniq_aggregation_on_metric(metric_ids, alias) +def unhandled_sessions( + org_id: int, metric_ids: Sequence[int], alias: str | None = None +) -> Function: + return _counter_sum_aggregation_on_session_status_factory( + org_id, session_status="unhandled", metric_ids=metric_ids, alias=alias + ) + + +def unhandled_users(org_id: int, metric_ids: Sequence[int], alias: str | None = None) -> Function: + return _set_uniq_aggregation_on_session_status_factory( + org_id, session_status="unhandled", metric_ids=metric_ids, alias=alias + ) + + def crashed_sessions(org_id: int, metric_ids: Sequence[int], alias: str | None = None) -> Function: return _counter_sum_aggregation_on_session_status_factory( org_id, session_status="crashed", metric_ids=metric_ids, alias=alias diff --git a/src/sentry/snuba/metrics/naming_layer/mri.py b/src/sentry/snuba/metrics/naming_layer/mri.py index 33ae019c23f1f8..b19ef312ef3f95 100644 --- a/src/sentry/snuba/metrics/naming_layer/mri.py +++ b/src/sentry/snuba/metrics/naming_layer/mri.py @@ -72,20 +72,32 @@ class SessionMRI(Enum): ERRORED_PREAGGREGATED = "e:sessions/error.preaggr@none" ERRORED_SET = "e:sessions/error.unique@none" ERRORED_ALL = "e:sessions/all_errored@none" + HANDLED = "e:sessions/handled.unique@none" # all sessions excluding handled and crashed + UNHANDLED = "e:sessions/unhandled@none" # unhandled, does not include crashed CRASHED_AND_ABNORMAL = "e:sessions/crashed_abnormal@none" CRASHED = "e:sessions/crashed@none" CRASH_FREE = "e:sessions/crash_free@none" ABNORMAL = "e:sessions/abnormal@none" + HANDLED_RATE = "e:sessions/handled_rate@ratio" # all sessions excluding handled and crashed + UNHANDLED_RATE = "e:sessions/unhandled_rate@ratio" # unhandled, does not include crashed CRASH_RATE = "e:sessions/crash_rate@ratio" - CRASH_FREE_RATE = "e:sessions/crash_free_rate@ratio" + CRASH_FREE_RATE = "e:sessions/crash_free_rate@ratio" # includes handled and unhandled ALL_USER = "e:sessions/user.all@none" HEALTHY_USER = "e:sessions/user.healthy@none" ERRORED_USER = "e:sessions/user.errored@none" ERRORED_USER_ALL = "e:sessions/user.all_errored@none" + HANDLED_USER = "e:sessions/user.handled@none" # all sessions excluding handled and crashed + UNHANDLED_USER = "e:sessions/user.unhandled@none" # unhandled, does not include crashed CRASHED_AND_ABNORMAL_USER = "e:sessions/user.crashed_abnormal@none" CRASHED_USER = "e:sessions/user.crashed@none" CRASH_FREE_USER = "e:sessions/user.crash_free@none" ABNORMAL_USER = "e:sessions/user.abnormal@none" + HANDLED_USER_RATE = ( + "e:sessions/user.handled_rate@ratio" # all sessions excluding handled and crashed + ) + UNHANDLED_USER_RATE = ( + "e:sessions/user.unhandled_rate@ratio" # unhandled, does not include crashed + ) CRASH_USER_RATE = "e:sessions/user.crash_rate@ratio" CRASH_FREE_USER_RATE = "e:sessions/user.crash_free_rate@ratio" ANR_USER = "e:sessions/user.anr@none" diff --git a/src/sentry/snuba/metrics/naming_layer/public.py b/src/sentry/snuba/metrics/naming_layer/public.py index 40d262b90fab0b..562599c96e092b 100644 --- a/src/sentry/snuba/metrics/naming_layer/public.py +++ b/src/sentry/snuba/metrics/naming_layer/public.py @@ -36,6 +36,7 @@ class SessionMetricKey(Enum): DURATION = "session.duration" ALL = "session.all" ABNORMAL = "session.abnormal" + UNHANDLED = "session.unhandled" CRASHED = "session.crashed" CRASH_FREE = "session.crash_free" ERRORED = "session.errored" @@ -46,6 +47,7 @@ class SessionMetricKey(Enum): ALL_USER = "session.all_user" ABNORMAL_USER = "session.abnormal_user" CRASHED_USER = "session.crashed_user" + UNHANDLED_USER = "session.unhandled_user" CRASH_FREE_USER = "session.crash_free_user" ERRORED_USER = "session.errored_user" HEALTHY_USER = "session.healthy_user" diff --git a/src/sentry/snuba/sessions_v2.py b/src/sentry/snuba/sessions_v2.py index 0eded1186366bd..0884428747fd03 100644 --- a/src/sentry/snuba/sessions_v2.py +++ b/src/sentry/snuba/sessions_v2.py @@ -106,7 +106,13 @@ def get_snuba_columns(self, raw_groupby) -> list[str]: ... class SessionsField: def get_snuba_columns(self, raw_groupby): if "session.status" in raw_groupby: - return ["sessions", "sessions_abnormal", "sessions_crashed", "sessions_errored"] + return [ + "sessions", + "sessions_abnormal", + "sessions_crashed", + "sessions_errored", + "sessions_unhandled", + ] return ["sessions"] def extract_from_row(self, row, group): @@ -116,15 +122,20 @@ def extract_from_row(self, row, group): if status is None: return row["sessions"] if status == "healthy": - healthy_sessions = row["sessions"] - row["sessions_errored"] + healthy_sessions = row["sessions"] - row["sessions_errored"] - row["sessions_unhandled"] return max(healthy_sessions, 0) if status == "abnormal": return row["sessions_abnormal"] + if status == "unhandled": + return row["sessions_unhandled"] if status == "crashed": return row["sessions_crashed"] if status == "errored": errored_sessions = ( - row["sessions_errored"] - row["sessions_crashed"] - row["sessions_abnormal"] + row["sessions_errored"] + - row["sessions_unhandled"] + - row["sessions_crashed"] + - row["sessions_abnormal"] ) return max(errored_sessions, 0) return 0 @@ -133,7 +144,7 @@ def extract_from_row(self, row, group): class UsersField: def get_snuba_columns(self, raw_groupby): if "session.status" in raw_groupby: - return ["users", "users_abnormal", "users_crashed", "users_errored"] + return ["users", "users_abnormal", "users_crashed", "users_errored", "users_unhandled"] return ["users"] def extract_from_row(self, row, group): @@ -143,14 +154,21 @@ def extract_from_row(self, row, group): if status is None: return row["users"] if status == "healthy": - healthy_users = row["users"] - row["users_errored"] + healthy_users = row["users"] - row["users_errored"] - row["users_unhandled"] return max(healthy_users, 0) if status == "abnormal": return row["users_abnormal"] + if status == "unhandled": + return row["users_unhandled"] if status == "crashed": return row["users_crashed"] if status == "errored": - errored_users = row["users_errored"] - row["users_crashed"] - row["users_abnormal"] + errored_users = ( + row["users_errored"] + - row["users_crashed"] + - row["users_abnormal"] + - row["users_unhandled"] + ) return max(errored_users, 0) return 0 @@ -232,7 +250,10 @@ def get_snuba_groupby(self): return [] def get_keys_for_row(self, row): - return [("session.status", key) for key in ["healthy", "abnormal", "crashed", "errored"]] + return [ + ("session.status", key) + for key in ["healthy", "abnormal", "crashed", "errored", "unhandled"] + ] # NOTE: in the future we might add new `user_agent` and `os` fields diff --git a/src/sentry/testutils/cases.py b/src/sentry/testutils/cases.py index 26087e7f41d513..7cf939cede775a 100644 --- a/src/sentry/testutils/cases.py +++ b/src/sentry/testutils/cases.py @@ -1454,7 +1454,7 @@ def push(mri: str, tags, value): elif not user_is_nil: push(SessionMRI.RAW_USER.value, {}, user) - if status in ("abnormal", "crashed"): # fatal + if status in ("abnormal", "unhandled", "crashed"): # fatal push(SessionMRI.RAW_SESSION.value, {"session.status": status}, +1) if not user_is_nil: push(SessionMRI.RAW_USER.value, {"session.status": status}, user) diff --git a/src/sentry/web/frontend/debug/debug_chart_renderer.py b/src/sentry/web/frontend/debug/debug_chart_renderer.py index 7f54e7d8071dec..8fe4746d07a471 100644 --- a/src/sentry/web/frontend/debug/debug_chart_renderer.py +++ b/src/sentry/web/frontend/debug/debug_chart_renderer.py @@ -663,6 +663,11 @@ "totals": {"sum(session)": 0}, "series": {"sum(session)": [0, 0, 0, 0, 0, 0, 0]}, }, + { + "by": {"session.status": "unhandled"}, + "totals": {"sum(session)": 0}, + "series": {"sum(session)": [0, 0, 0, 0, 0, 0, 0]}, + }, ], }, "rule": { diff --git a/tests/sentry/release_health/test_metrics_sessions_v2.py b/tests/sentry/release_health/test_metrics_sessions_v2.py index 1f12fd253cf56d..4386e858fc4385 100644 --- a/tests/sentry/release_health/test_metrics_sessions_v2.py +++ b/tests/sentry/release_health/test_metrics_sessions_v2.py @@ -41,7 +41,12 @@ Condition(Column("session.status"), Op.NEQ, "abnormal"), ], [Condition(Column("release"), Op.EQ, "foo")], - {SessionStatus.HEALTHY, SessionStatus.ERRORED, SessionStatus.CRASHED}, + { + SessionStatus.HEALTHY, + SessionStatus.ERRORED, + SessionStatus.CRASHED, + SessionStatus.UNHANDLED, + }, ), ( [ @@ -49,7 +54,12 @@ Condition(Column("session.status"), Op.NOT_IN, ["abnormal", "bogus"]), ], [Condition(Column("release"), Op.EQ, "foo")], - {SessionStatus.HEALTHY, SessionStatus.ERRORED, SessionStatus.CRASHED}, + { + SessionStatus.HEALTHY, + SessionStatus.ERRORED, + SessionStatus.CRASHED, + SessionStatus.UNHANDLED, + }, ), ( [ diff --git a/tests/sentry/snuba/metrics/fields/test_base.py b/tests/sentry/snuba/metrics/fields/test_base.py index e4223854dafd2d..614a6335009d3e 100644 --- a/tests/sentry/snuba/metrics/fields/test_base.py +++ b/tests/sentry/snuba/metrics/fields/test_base.py @@ -35,6 +35,8 @@ errored_all_users, errored_preaggr_sessions, subtraction, + unhandled_sessions, + unhandled_users, uniq_aggregation_on_metric, ) from sentry.snuba.metrics.naming_layer import ( @@ -130,6 +132,8 @@ def test_get_entity_and_validate_dependency_tree_of_a_single_entity_derived_metr SessionMRI.CRASHED_USER.value: "metrics_sets", SessionMRI.ABNORMAL.value: "metrics_counters", SessionMRI.ABNORMAL_USER.value: "metrics_sets", + SessionMRI.UNHANDLED.value: "metrics_counters", + SessionMRI.UNHANDLED_USER.value: "metrics_sets", SessionMRI.CRASH_FREE_RATE.value: "metrics_counters", SessionMRI.CRASH_FREE_USER_RATE.value: "metrics_sets", SessionMRI.ERRORED_PREAGGREGATED.value: "metrics_counters", @@ -163,7 +167,7 @@ def test_generate_select_snql_of_derived_metric(self) -> None: """ org_id = self.project.organization_id use_case_id = UseCaseID.SESSIONS - for status in ("init", "abnormal", "crashed", "errored"): + for status in ("init", "abnormal", "crashed", "errored", "unhandled"): rh_indexer_record(org_id, status) session_ids = [rh_indexer_record(org_id, SessionMRI.RAW_SESSION.value)] session_user_ids = [rh_indexer_record(org_id, SessionMRI.RAW_USER.value)] @@ -177,6 +181,8 @@ def test_generate_select_snql_of_derived_metric(self) -> None: SessionMRI.CRASHED_USER.value: (crashed_users, session_user_ids), SessionMRI.ABNORMAL_USER.value: (abnormal_users, session_user_ids), SessionMRI.ERRORED_USER_ALL.value: (errored_all_users, session_user_ids), + SessionMRI.UNHANDLED.value: (unhandled_sessions, session_ids), + SessionMRI.UNHANDLED_USER.value: (unhandled_users, session_user_ids), } for metric_mri, (func, metric_ids_list) in derived_name_snql.items(): assert DERIVED_METRICS[metric_mri].generate_select_statements( diff --git a/tests/sentry/snuba/metrics/test_naming_layer.py b/tests/sentry/snuba/metrics/test_naming_layer.py index 203be0b69ed9cb..f46d18a2261aad 100644 --- a/tests/sentry/snuba/metrics/test_naming_layer.py +++ b/tests/sentry/snuba/metrics/test_naming_layer.py @@ -13,6 +13,7 @@ "session.all", "session.abnormal", "session.crashed", + "session.unhandled", "session.crash_free_user_rate" "foo.bar.bar", "foo_bar.bar", ], @@ -32,6 +33,7 @@ def test_valid_public_name_regex(name): "..crashed", "e:sessions/error.preaggr@none", "e:sessions/crashed_abnormal@none", + "e:sessions/unhandled@none", "e:sessions/user.crashed_abnormal@none", "session.09_crashed", ], diff --git a/tests/sentry/snuba/metrics/test_snql.py b/tests/sentry/snuba/metrics/test_snql.py index 6c09d9c681033a..a24d0997829e3f 100644 --- a/tests/sentry/snuba/metrics/test_snql.py +++ b/tests/sentry/snuba/metrics/test_snql.py @@ -33,6 +33,8 @@ session_duration_filters, subtraction, tolerated_count_transaction, + unhandled_sessions, + unhandled_users, uniq_aggregation_on_metric, uniq_if_column_snql, ) @@ -70,6 +72,7 @@ def setUp(self) -> None: "errored", "exited", "init", + "unhandled", "session.status", } } @@ -98,6 +101,7 @@ def test_counter_sum_aggregation_on_session_status(self) -> None: ("crashed", crashed_sessions), ("errored_preaggr", errored_preaggr_sessions), ("abnormal", abnormal_sessions), + ("unhandled", unhandled_sessions), ]: assert func(self.org_id, self.metric_ids, alias=status) == Function( "sumIf", @@ -129,6 +133,7 @@ def test_set_uniq_aggregation_on_session_status(self) -> None: ("crashed", crashed_users), ("abnormal", abnormal_users), ("errored", errored_all_users), + ("unhandled", unhandled_users), ]: assert func(self.org_id, self.metric_ids, alias=status) == Function( "uniqIf", diff --git a/tests/snuba/api/endpoints/test_organization_sessions.py b/tests/snuba/api/endpoints/test_organization_sessions.py index 5ee25ef8794f10..649df31b815422 100644 --- a/tests/snuba/api/endpoints/test_organization_sessions.py +++ b/tests/snuba/api/endpoints/test_organization_sessions.py @@ -124,6 +124,8 @@ def setup_fixture(self): make_session( self.project3, distinct_id="39887d89-13b2-4c84-8c23-5d13d2102664", errors=1 ), + make_session(self.project3, status="unhandled"), + make_session(self.project3, status="unhandled"), make_session(self.project4), ] ) @@ -239,7 +241,7 @@ def test_timeseries_interval(self): "query": "", "intervals": [previous_start_of_day_snuba_format, start_of_day_snuba_format], "groups": [ - {"by": {}, "series": {"sum(session)": [0, 9]}, "totals": {"sum(session)": 9}} + {"by": {}, "series": {"sum(session)": [0, 11]}, "totals": {"sum(session)": 11}} ], } @@ -265,8 +267,8 @@ def test_timeseries_interval(self): "groups": [ { "by": {}, - "series": {"sum(session)": [0, 0, 1, 2, 6]}, - "totals": {"sum(session)": 9}, + "series": {"sum(session)": [0, 0, 1, 2, 8]}, + "totals": {"sum(session)": 11}, } ], } @@ -292,7 +294,7 @@ def test_user_all_accessible(self): "query": "", "intervals": [start_of_previous_day_snuba_format, start_of_day_snuba_format], "groups": [ - {"by": {}, "series": {"sum(session)": [0, 9]}, "totals": {"sum(session)": 9}} + {"by": {}, "series": {"sum(session)": [0, 11]}, "totals": {"sum(session)": 11}} ], } @@ -376,7 +378,7 @@ def test_filter_projects(self): assert response.status_code == 200, response.content assert result_sorted(response.data)["groups"] == [ - {"by": {}, "series": {"sum(session)": [0, 5]}, "totals": {"sum(session)": 5}} + {"by": {}, "series": {"sum(session)": [0, 7]}, "totals": {"sum(session)": 7}} ] @freeze_time(MOCK_DATETIME) @@ -410,8 +412,8 @@ def req(**kwargs): assert response.data["groups"] == [ { "by": {}, - "totals": {"anr_rate()": 0.0, "sum(session)": 9}, - "series": {"anr_rate()": [None, 0.0], "sum(session)": [0, 9]}, + "totals": {"anr_rate()": 0.0, "sum(session)": 11}, + "series": {"anr_rate()": [None, 0.0], "sum(session)": [0, 11]}, } ] @@ -539,7 +541,7 @@ def test_filter_unknown_release_in(self): "series": {"sum(session)": [0, 0]}, "totals": {"sum(session)": 0}, } - for status in ("abnormal", "crashed", "errored", "healthy") + for status in ("abnormal", "crashed", "errored", "healthy", "unhandled") ] @freeze_time(MOCK_DATETIME) @@ -568,8 +570,8 @@ def test_groupby_project(self): }, { "by": {"project": self.project3.id}, - "series": {"sum(session)": [0, 3]}, - "totals": {"sum(session)": 3}, + "series": {"sum(session)": [0, 5]}, + "totals": {"sum(session)": 5}, }, ] @@ -594,8 +596,8 @@ def test_groupby_environment(self): }, { "by": {"environment": "production"}, - "series": {"sum(session)": [0, 8]}, - "totals": {"sum(session)": 8}, + "series": {"sum(session)": [0, 10]}, + "totals": {"sum(session)": 10}, }, ] @@ -615,8 +617,8 @@ def test_groupby_release(self): assert result_sorted(response.data)["groups"] == [ { "by": {"release": "foo@1.0.0"}, - "series": {"sum(session)": [0, 7]}, - "totals": {"sum(session)": 7}, + "series": {"sum(session)": [0, 9]}, + "totals": {"sum(session)": 9}, }, { "by": {"release": "foo@1.1.0"}, @@ -656,14 +658,19 @@ def test_groupby_status(self): }, { "by": {"session.status": "errored"}, - "series": {"sum(session)": [0, 2]}, - "totals": {"sum(session)": 2}, + "series": {"sum(session)": [0, 4]}, + "totals": {"sum(session)": 4}, }, { "by": {"session.status": "healthy"}, "series": {"sum(session)": [0, 6]}, "totals": {"sum(session)": 6}, }, + { + "by": {"session.status": "unhandled"}, + "series": {"sum(session)": [0, 2]}, + "totals": {"sum(session)": 2}, + }, ] @freeze_time(MOCK_DATETIME) @@ -687,8 +694,8 @@ def test_groupby_cross(self): }, { "by": {"environment": "production", "release": "foo@1.0.0"}, - "series": {"sum(session)": [0, 6]}, - "totals": {"sum(session)": 6}, + "series": {"sum(session)": [0, 8]}, + "totals": {"sum(session)": 8}, }, { "by": {"environment": "production", "release": "foo@1.1.0"}, @@ -754,6 +761,11 @@ def test_users_groupby(self): "series": {"count_unique(user)": [0, 0]}, "totals": {"count_unique(user)": 0}, }, + { + "by": {"session.status": "unhandled"}, + "series": {"count_unique(user)": [0, 0]}, + "totals": {"count_unique(user)": 0}, + }, ] expected_duration_values = { @@ -778,7 +790,8 @@ def test_users_groupby_status_advanced(self): session2b = uuid4().hex user3 = uuid4().hex - session3 = uuid4().hex + session3a = uuid4().hex + session3b = uuid4().hex self.store_session( make_session(project, session_id=session1, distinct_id=user1, status="ok") @@ -804,7 +817,12 @@ def test_users_groupby_status_advanced(self): self.store_session( make_session( - project, session_id=session3, distinct_id=user3, errors=123, status="errored" + project, session_id=session3a, distinct_id=user3, errors=123, status="errored" + ) + ) + self.store_session( + make_session( + project, session_id=session3b, distinct_id=user3, errors=123, status="unhandled" ) ) @@ -864,6 +882,12 @@ def test_users_groupby_status_advanced(self): "series": {"count_unique(user)": [0, 3]}, "totals": {"count_unique(user)": 3}, }, + { + # user + "by": {"session.status": "unhandled"}, + "series": {"count_unique(user)": [0, 1]}, + "totals": {"count_unique(user)": 1}, + }, ] @freeze_time(MOCK_DATETIME) @@ -935,7 +959,7 @@ def test_duration_percentiles_groupby(self): assert group["totals"] == {key: None for key in expected}, group["by"] assert group["series"] == {key: [None, None] for key in expected} - assert seen == {"abnormal", "crashed", "errored", "healthy"} + assert seen == {"abnormal", "crashed", "errored", "healthy", "unhandled"} @freeze_time(MOCK_DATETIME) def test_snuba_limit_exceeded(self): @@ -972,8 +996,8 @@ def test_snuba_limit_exceeded(self): "environment": "production", "project": self.project3.id, }, - "totals": {"sum(session)": 2, "count_unique(user)": 1}, - "series": {"sum(session)": [0, 0, 0, 2], "count_unique(user)": [0, 0, 0, 1]}, + "totals": {"sum(session)": 4, "count_unique(user)": 1}, + "series": {"sum(session)": [0, 0, 0, 4], "count_unique(user)": [0, 0, 0, 1]}, }, ] @@ -1039,10 +1063,20 @@ def test_snuba_limit_exceeded_groupby_status(self): }, { "by": { - "session.status": "abnormal", + "project": self.project1.id, + "session.status": "unhandled", + "release": "foo@1.0.0", + "environment": "production", + }, + "totals": {"sum(session)": 0, "count_unique(user)": 0}, + "series": {"sum(session)": [0, 0, 0, 0], "count_unique(user)": [0, 0, 0, 0]}, + }, + { + "by": { "release": "foo@1.0.0", "project": self.project3.id, "environment": "production", + "session.status": "abnormal", }, "totals": {"sum(session)": 0, "count_unique(user)": 0}, "series": {"sum(session)": [0, 0, 0, 0], "count_unique(user)": [0, 0, 0, 0]}, @@ -1064,19 +1098,29 @@ def test_snuba_limit_exceeded_groupby_status(self): "environment": "production", "session.status": "errored", }, - "totals": {"sum(session)": 1, "count_unique(user)": 1}, - "series": {"sum(session)": [0, 0, 0, 1], "count_unique(user)": [0, 0, 0, 1]}, + "totals": {"sum(session)": 3, "count_unique(user)": 1}, + "series": {"sum(session)": [0, 0, 0, 3], "count_unique(user)": [0, 0, 0, 1]}, }, { "by": { - "session.status": "healthy", "release": "foo@1.0.0", "project": self.project3.id, "environment": "production", + "session.status": "healthy", }, "totals": {"sum(session)": 1, "count_unique(user)": 0}, "series": {"sum(session)": [0, 0, 0, 1], "count_unique(user)": [0, 0, 0, 0]}, }, + { + "by": { + "release": "foo@1.0.0", + "project": self.project3.id, + "environment": "production", + "session.status": "unhandled", + }, + "totals": {"sum(session)": 2, "count_unique(user)": 0}, + "series": {"sum(session)": [0, 0, 0, 2], "count_unique(user)": [0, 0, 0, 0]}, + }, ] @freeze_time(MOCK_DATETIME) @@ -1413,9 +1457,9 @@ def test_orderby(self): "release": "foo@1.0.0", "environment": "production", }, - "totals": {"sum(session)": 2, "p95(session.duration)": 79400.0}, + "totals": {"sum(session)": 4, "p95(session.duration)": 79400.0}, "series": { - "sum(session)": [0, 0, 2], + "sum(session)": [0, 0, 4], "p95(session.duration)": [None, None, 79400.0], }, }, @@ -1506,14 +1550,14 @@ def req(**kwargs): response = req(field=["sum(session)"], query="!session.status:healthy") assert response.status_code == 200, response.content assert result_sorted(response.data)["groups"] == [ - {"by": {}, "series": {"sum(session)": [0, 3]}, "totals": {"sum(session)": 3}} + {"by": {}, "series": {"sum(session)": [0, 7]}, "totals": {"sum(session)": 7}} ] # sum(session) filtered by multiple statuses adds them response = req(field=["sum(session)"], query="session.status:[healthy, errored]") assert response.status_code == 200, response.content assert result_sorted(response.data)["groups"] == [ - {"by": {}, "series": {"sum(session)": [0, 8]}, "totals": {"sum(session)": 8}} + {"by": {}, "series": {"sum(session)": [0, 10]}, "totals": {"sum(session)": 10}} ] response = req( @@ -1525,8 +1569,8 @@ def req(**kwargs): assert result_sorted(response.data)["groups"] == [ { "by": {"session.status": "errored"}, - "totals": {"sum(session)": 2}, - "series": {"sum(session)": [0, 2]}, + "totals": {"sum(session)": 4}, + "series": {"sum(session)": [0, 4]}, }, { "by": {"session.status": "healthy"}, @@ -1750,15 +1794,15 @@ def req(**kwargs): { "by": {"environment": "production", "release": "foo@1.0.0"}, "series": { - "crash_free_rate(session)": [None, 0.8333333333333334], + "crash_free_rate(session)": [None, 0.875], "crash_free_rate(user)": [None, 1.0], - "crash_rate(session)": [None, 0.16666666666666666], + "crash_rate(session)": [None, 0.125], "crash_rate(user)": [None, 0.0], }, "totals": { - "crash_free_rate(session)": 0.8333333333333334, + "crash_free_rate(session)": 0.875, "crash_free_rate(user)": 1.0, - "crash_rate(session)": 0.16666666666666666, + "crash_rate(session)": 0.125, "crash_rate(user)": 0.0, }, }, @@ -1996,6 +2040,9 @@ def test_order_by_with_session_status_groupby(self): self.store_session( make_session(rando_project, release=release_1b.version, status="crashed") ) + self.store_session( + make_session(rando_project, release=release_1b.version, status="unhandled") + ) for _ in range(10): self.store_session(make_session(rando_project, release=release_1b.version)) for _ in range(3): @@ -2006,6 +2053,9 @@ def test_order_by_with_session_status_groupby(self): self.store_session( make_session(rando_project, release=release_1a.version, status="crashed") ) + self.store_session( + make_session(rando_project, release=release_1a.version, status="unhandled") + ) self.store_session(make_session(rando_project, release=release_1a.version)) for _ in range(3): self.store_session(make_session(rando_project, errors=1, release=release_1a.version)) @@ -2033,14 +2083,19 @@ def test_order_by_with_session_status_groupby(self): }, { "by": {"release": "1B", "session.status": "errored"}, - "totals": {"sum(session)": 3}, - "series": {"sum(session)": [0, 3]}, + "totals": {"sum(session)": 4}, + "series": {"sum(session)": [0, 4]}, }, { "by": {"release": "1B", "session.status": "healthy"}, "totals": {"sum(session)": 10}, "series": {"sum(session)": [0, 10]}, }, + { + "by": {"release": "1B", "session.status": "unhandled"}, + "totals": {"sum(session)": 1}, + "series": {"sum(session)": [0, 1]}, + }, { "by": {"release": "1A", "session.status": "abnormal"}, "totals": {"sum(session)": 0}, @@ -2053,14 +2108,19 @@ def test_order_by_with_session_status_groupby(self): }, { "by": {"release": "1A", "session.status": "errored"}, - "totals": {"sum(session)": 3}, - "series": {"sum(session)": [0, 3]}, + "totals": {"sum(session)": 4}, + "series": {"sum(session)": [0, 4]}, }, { "by": {"release": "1A", "session.status": "healthy"}, "totals": {"sum(session)": 1}, "series": {"sum(session)": [0, 1]}, }, + { + "by": {"release": "1A", "session.status": "unhandled"}, + "totals": {"sum(session)": 1}, + "series": {"sum(session)": [0, 1]}, + }, ] @freeze_time(MOCK_DATETIME) diff --git a/tests/snuba/sessions/test_sessions.py b/tests/snuba/sessions/test_sessions.py index a1563486369088..148140dac435ab 100644 --- a/tests/snuba/sessions/test_sessions.py +++ b/tests/snuba/sessions/test_sessions.py @@ -29,9 +29,11 @@ def setUp(self): self.session_started = time.time() // 60 * 60 self.session_release = "foo@1.0.0" self.session_crashed_release = "foo@2.0.0" + self.session_unhandled_release = "foo@2.1.0" session_1 = "5d52fd05-fcc9-4bf3-9dc9-267783670341" session_2 = "5e910c1a-6941-460e-9843-24103fb6a63c" session_3 = "a148c0c5-06a2-423b-8901-6b43b812cf82" + session_4 = "dce6ff99-3fcc-4a61-9a79-7bd8d6b917e9" user_1 = "39887d89-13b2-4c84-8c23-5d13d2102666" self.store_session( @@ -80,6 +82,17 @@ def setUp(self): received=self.received, ) ) + self.store_session( + self.build_session( + distinct_id=user_1, + session_id=session_4, + status="unhandled", + release=self.session_unhandled_release, + environment="prod", + started=self.session_started, + received=self.received, + ) + ) def test_get_oldest_health_data_for_releases(self): data = self.backend.get_oldest_health_data_for_releases( @@ -453,7 +466,7 @@ def test_basic_release_model_adoptions(self): """ proj_id = self.project.id data = self.backend.get_changed_project_release_model_adoptions([proj_id]) - assert set(data) == {(proj_id, "foo@1.0.0"), (proj_id, "foo@2.0.0")} + assert set(data) == {(proj_id, "foo@1.0.0"), (proj_id, "foo@2.0.0"), (proj_id, "foo@2.1.0")} def test_old_release_model_adoptions(self): """ @@ -463,7 +476,7 @@ def test_old_release_model_adoptions(self): proj_id = self.project.id self.store_session( self.build_session( - release="foo@3.0.0", + release="foo@0.0.1-beta.1", environment="prod", status="crashed", started=self.session_started - _100h, @@ -472,7 +485,7 @@ def test_old_release_model_adoptions(self): ) data = self.backend.get_changed_project_release_model_adoptions([proj_id]) - assert set(data) == {(proj_id, "foo@1.0.0"), (proj_id, "foo@2.0.0")} + assert set(data) == {(proj_id, "foo@1.0.0"), (proj_id, "foo@2.0.0"), (proj_id, "foo@2.1.0")} def test_multi_proj_release_model_adoptions(self): """Test that the api works with multiple projects""" @@ -493,6 +506,7 @@ def test_multi_proj_release_model_adoptions(self): assert set(data) == { (proj_id, "foo@1.0.0"), (proj_id, "foo@2.0.0"), + (proj_id, "foo@2.1.0"), (new_proj_id, "foo@3.0.0"), } @@ -534,11 +548,12 @@ def test_get_project_release_stats_users(self): { "duration_p50": None, "duration_p90": None, - "users": 0, "users_abnormal": 0, "users_crashed": 0, "users_errored": 0, "users_healthy": 0, + "users_unhandled": 0, + "users": 0, }, { "duration_p50": None, @@ -548,6 +563,7 @@ def test_get_project_release_stats_users(self): "users_crashed": 0, "users_errored": 0, "users_healthy": 0, + "users_unhandled": 0, }, { "duration_p50": None, @@ -557,6 +573,7 @@ def test_get_project_release_stats_users(self): "users_crashed": 0, "users_errored": 0, "users_healthy": 0, + "users_unhandled": 0, }, { "duration_p50": 45.0, @@ -566,6 +583,7 @@ def test_get_project_release_stats_users(self): "users_crashed": 0, "users_errored": 0, "users_healthy": 1, + "users_unhandled": 0, }, ], { @@ -574,6 +592,63 @@ def test_get_project_release_stats_users(self): "users_crashed": 0, "users_errored": 0, "users_healthy": 1, + "users_unhandled": 0, + }, + ) + + def test_get_project_release_stats_users_unhandled(self): + self._test_get_project_release_stats( + "users", + self.session_unhandled_release, + [ + { + "duration_p50": None, + "duration_p90": None, + "users": 0, + "users_abnormal": 0, + "users_crashed": 0, + "users_errored": 0, + "users_healthy": 0, + "users_unhandled": 0, + }, + { + "duration_p50": None, + "duration_p90": None, + "users": 0, + "users_abnormal": 0, + "users_crashed": 0, + "users_errored": 0, + "users_healthy": 0, + "users_unhandled": 0, + }, + { + "duration_p50": None, + "duration_p90": None, + "users": 0, + "users_abnormal": 0, + "users_crashed": 0, + "users_errored": 0, + "users_healthy": 0, + "users_unhandled": 0, + }, + { + "duration_p50": None, + "duration_p90": None, + "users": 1, + "users_abnormal": 0, + "users_crashed": 0, + "users_errored": 0, + "users_healthy": 0, + "users_unhandled": 1, + }, + ], + { + "users": 1, + "users_abnormal": 0, + "users_crashed": 0, + "users_errored": 0, + "users_healthy": 0, + "users_unhandled": 1, }, ) @@ -590,6 +665,7 @@ def test_get_project_release_stats_users_crashed(self): "users_crashed": 0, "users_errored": 0, "users_healthy": 0, + "users_unhandled": 0, }, { "duration_p50": None, @@ -599,6 +675,7 @@ def test_get_project_release_stats_users_crashed(self): "users_crashed": 0, "users_errored": 0, "users_healthy": 0, + "users_unhandled": 0, }, { "duration_p50": None, @@ -608,6 +685,7 @@ def test_get_project_release_stats_users_crashed(self): "users_crashed": 0, "users_errored": 0, "users_healthy": 0, + "users_unhandled": 0, }, { "duration_p50": None, @@ -617,6 +695,7 @@ def test_get_project_release_stats_users_crashed(self): "users_crashed": 1, "users_errored": 0, "users_healthy": 0, + "users_unhandled": 0, }, ], { @@ -625,6 +704,7 @@ def test_get_project_release_stats_users_crashed(self): "users_crashed": 1, "users_errored": 0, "users_healthy": 0, + "users_unhandled": 0, }, ) @@ -641,6 +721,7 @@ def test_get_project_release_stats_sessions(self): "sessions_crashed": 0, "sessions_errored": 0, "sessions_healthy": 0, + "sessions_unhandled": 0, }, { "duration_p50": None, @@ -650,6 +731,7 @@ def test_get_project_release_stats_sessions(self): "sessions_crashed": 0, "sessions_errored": 0, "sessions_healthy": 0, + "sessions_unhandled": 0, }, { "duration_p50": None, @@ -659,6 +741,7 @@ def test_get_project_release_stats_sessions(self): "sessions_crashed": 0, "sessions_errored": 0, "sessions_healthy": 0, + "sessions_unhandled": 0, }, { "duration_p50": 45.0, @@ -668,6 +751,7 @@ def test_get_project_release_stats_sessions(self): "sessions_crashed": 0, "sessions_errored": 0, "sessions_healthy": 2, + "sessions_unhandled": 0, }, ], { @@ -676,6 +760,63 @@ def test_get_project_release_stats_sessions(self): "sessions_crashed": 0, "sessions_errored": 0, "sessions_healthy": 2, + "sessions_unhandled": 0, + }, + ) + + def test_get_project_release_stats_sessions_unhandled(self): + self._test_get_project_release_stats( + "sessions", + self.session_unhandled_release, + [ + { + "duration_p50": None, + "duration_p90": None, + "sessions": 0, + "sessions_abnormal": 0, + "sessions_crashed": 0, + "sessions_errored": 0, + "sessions_healthy": 0, + "sessions_unhandled": 0, + }, + { + "duration_p50": None, + "duration_p90": None, + "sessions": 0, + "sessions_abnormal": 0, + "sessions_crashed": 0, + "sessions_errored": 0, + "sessions_healthy": 0, + "sessions_unhandled": 0, + }, + { + "duration_p50": None, + "duration_p90": None, + "sessions": 0, + "sessions_abnormal": 0, + "sessions_crashed": 0, + "sessions_errored": 0, + "sessions_healthy": 0, + "sessions_unhandled": 0, + }, + { + "duration_p50": None, + "duration_p90": None, + "sessions": 1, + "sessions_abnormal": 0, + "sessions_crashed": 0, + "sessions_errored": 0, + "sessions_healthy": 0, + "sessions_unhandled": 1.0, + }, + ], + { + "sessions": 1, + "sessions_abnormal": 0, + "sessions_crashed": 0, + "sessions_errored": 0, + "sessions_healthy": 0, + "sessions_unhandled": 1.0, }, ) @@ -692,6 +833,7 @@ def test_get_project_release_stats_sessions_crashed(self): "sessions_crashed": 0, "sessions_errored": 0, "sessions_healthy": 0, + "sessions_unhandled": 0, }, { "duration_p50": None, @@ -701,6 +843,7 @@ def test_get_project_release_stats_sessions_crashed(self): "sessions_crashed": 0, "sessions_errored": 0, "sessions_healthy": 0, + "sessions_unhandled": 0, }, { "duration_p50": None, @@ -710,6 +853,7 @@ def test_get_project_release_stats_sessions_crashed(self): "sessions_crashed": 0, "sessions_errored": 0, "sessions_healthy": 0, + "sessions_unhandled": 0, }, { "duration_p50": None, @@ -719,6 +863,7 @@ def test_get_project_release_stats_sessions_crashed(self): "sessions_crashed": 1, "sessions_errored": 0, "sessions_healthy": 0, + "sessions_unhandled": 0, }, ], { @@ -727,6 +872,7 @@ def test_get_project_release_stats_sessions_crashed(self): "sessions_crashed": 1, "sessions_errored": 0, "sessions_healthy": 0, + "sessions_unhandled": 0, }, ) @@ -747,6 +893,7 @@ def test_get_project_release_stats_no_sessions(self): "sessions_crashed": 0, "sessions_errored": 0, "sessions_healthy": 0, + "sessions_unhandled": 0, }, { "duration_p50": None, @@ -756,6 +903,7 @@ def test_get_project_release_stats_no_sessions(self): "sessions_crashed": 0, "sessions_errored": 0, "sessions_healthy": 0, + "sessions_unhandled": 0, }, { "duration_p50": None, @@ -765,6 +913,7 @@ def test_get_project_release_stats_no_sessions(self): "sessions_crashed": 0, "sessions_errored": 0, "sessions_healthy": 0, + "sessions_unhandled": 0, }, { "duration_p50": None, @@ -774,6 +923,7 @@ def test_get_project_release_stats_no_sessions(self): "sessions_crashed": 0, "sessions_errored": 0, "sessions_healthy": 0, + "sessions_unhandled": 0, }, ], { @@ -782,6 +932,7 @@ def test_get_project_release_stats_no_sessions(self): "sessions_crashed": 0, "sessions_errored": 0, "sessions_healthy": 0, + "sessions_unhandled": 0, }, ) @@ -798,6 +949,7 @@ def test_get_project_release_stats_no_users(self): "users_crashed": 0, "users_errored": 0, "users_healthy": 0, + "users_unhandled": 0, }, { "duration_p50": None, @@ -807,6 +959,7 @@ def test_get_project_release_stats_no_users(self): "users_crashed": 0, "users_errored": 0, "users_healthy": 0, + "users_unhandled": 0, }, { "duration_p50": None, @@ -816,6 +969,7 @@ def test_get_project_release_stats_no_users(self): "users_crashed": 0, "users_errored": 0, "users_healthy": 0, + "users_unhandled": 0, }, { "duration_p50": None, @@ -825,6 +979,7 @@ def test_get_project_release_stats_no_users(self): "users_crashed": 0, "users_errored": 0, "users_healthy": 0, + "users_unhandled": 0, }, ], { @@ -833,6 +988,7 @@ def test_get_project_release_stats_no_users(self): "users_crashed": 0, "users_errored": 0, "users_healthy": 0, + "users_unhandled": 0, }, ) @@ -1564,4 +1720,5 @@ def test_get_project_release_stats_users(self): "users_crashed": 1, "users_errored": 0, "users_healthy": 2, + "users_unhandled": 0, } diff --git a/tests/snuba/sessions/test_sessions_v2.py b/tests/snuba/sessions/test_sessions_v2.py index dd1dd5c58cd4db..453b95c135bda1 100644 --- a/tests/snuba/sessions/test_sessions_v2.py +++ b/tests/snuba/sessions/test_sessions_v2.py @@ -197,6 +197,7 @@ def test_virtual_groupby_query(): "sessions_abnormal", "sessions_crashed", "sessions_errored", + "sessions_unhandled", ] assert query.query_groupby == [] @@ -209,6 +210,7 @@ def test_virtual_groupby_query(): "users_abnormal", "users_crashed", "users_errored", + "users_unhandled", ] assert query.query_groupby == [] @@ -575,61 +577,71 @@ def test_massage_virtual_groupby_timeseries(): ) result_totals = [ { - "users": 1, - "users_crashed": 1, - "sessions": 31, - "sessions_errored": 15, - "users_errored": 1, "sessions_abnormal": 6, "sessions_crashed": 8, + "sessions_errored": 15, + "sessions_unhandled": 0, + "sessions": 31, "users_abnormal": 0, + "users_crashed": 1, + "users_errored": 1, + "users_unhandled": 4, + "users": 5, } ] # snuba returns the datetimes as strings for now result_timeseries = [ { - "sessions_errored": 1, - "users": 1, - "users_crashed": 1, + "bucketed_started": "2020-12-18T12:00:00+00:00", "sessions_abnormal": 0, + "sessions_crashed": 1, + "sessions_errored": 1, + "sessions_unhandled": 0, "sessions": 3, - "users_errored": 1, "users_abnormal": 0, - "sessions_crashed": 1, - "bucketed_started": "2020-12-18T12:00:00+00:00", + "users_crashed": 1, + "users_errored": 1, + "users_unhandled": 0, + "users": 1, }, { - "sessions_errored": 0, - "users": 1, - "users_crashed": 0, + "bucketed_started": "2020-12-18T06:00:00+00:00", "sessions_abnormal": 0, + "sessions_crashed": 0, + "sessions_errored": 0, + "sessions_unhandled": 0, "sessions": 3, - "users_errored": 0, "users_abnormal": 0, - "sessions_crashed": 0, - "bucketed_started": "2020-12-18T06:00:00+00:00", + "users_crashed": 0, + "users_errored": 0, + "users_unhandled": 1, + "users": 2, }, { - "sessions_errored": 10, - "users": 1, - "users_crashed": 0, + "bucketed_started": "2020-12-18T00:00:00+00:00", "sessions_abnormal": 2, + "sessions_crashed": 4, + "sessions_errored": 10, + "sessions_unhandled": 0, "sessions": 15, - "users_errored": 0, "users_abnormal": 0, - "sessions_crashed": 4, - "bucketed_started": "2020-12-18T00:00:00+00:00", + "users_crashed": 0, + "users_errored": 0, + "users_unhandled": 3, + "users": 4, }, { - "sessions_errored": 4, - "users": 1, - "users_crashed": 0, + "bucketed_started": "2020-12-17T18:00:00+00:00", "sessions_abnormal": 4, + "sessions_crashed": 3, + "sessions_errored": 4, + "sessions_unhandled": 0, "sessions": 10, - "users_errored": 0, "users_abnormal": 0, - "sessions_crashed": 3, - "bucketed_started": "2020-12-17T18:00:00+00:00", + "users_crashed": 0, + "users_errored": 0, + "users_unhandled": 0, + "users": 1, }, ] @@ -669,6 +681,11 @@ def test_massage_virtual_groupby_timeseries(): # so the `0` here is expected, as that's an example of the `count_unique` behavior. "totals": {"count_unique(user)": 0, "sum(session)": 16}, }, + { + "by": {"session.status": "unhandled"}, + "series": {"count_unique(user)": [0, 0, 3, 1, 0], "sum(session)": [0, 0, 0, 0, 0]}, + "totals": {"count_unique(user)": 4, "sum(session)": 0}, + }, ], } @@ -685,26 +702,30 @@ def test_clamping_in_massage_sessions_results_with_groupby_timeseries(): # snuba returns the datetimes as strings for now result_timeseries = [ { - "sessions": 7, - "sessions_errored": 3, - "sessions_crashed": 2, + "bucketed_started": "2020-12-18T12:00:00+00:00", "sessions_abnormal": 2, - "users": 7, - "users_errored": 3, - "users_crashed": 2, + "sessions_crashed": 2, + "sessions_errored": 3, + "sessions_unhandled": 0, + "sessions": 7, "users_abnormal": 2, - "bucketed_started": "2020-12-18T12:00:00+00:00", + "users_crashed": 2, + "users_errored": 3, + "users_unhandled": 0, + "users": 7, }, { - "sessions": 5, - "sessions_errored": 10, - "sessions_crashed": 0, + "bucketed_started": "2020-12-18T06:00:00+00:00", "sessions_abnormal": 0, - "users": 5, - "users_errored": 10, - "users_crashed": 0, + "sessions_crashed": 0, + "sessions_errored": 10, + "sessions_unhandled": 0, + "sessions": 5, "users_abnormal": 0, - "bucketed_started": "2020-12-18T06:00:00+00:00", + "users_crashed": 0, + "users_errored": 10, + "users_unhandled": 0, + "users": 5, }, ] expected_result = { @@ -737,6 +758,11 @@ def test_clamping_in_massage_sessions_results_with_groupby_timeseries(): "series": {"count_unique(user)": [0, 0, 4], "sum(session)": [0, 0, 4]}, "totals": {"count_unique(user)": 0, "sum(session)": 0}, }, + { + "by": {"session.status": "unhandled"}, + "series": {"count_unique(user)": [0, 0, 0], "sum(session)": [0, 0, 0]}, + "totals": {"count_unique(user)": 0, "sum(session)": 0}, + }, ], }