From 6b40f95033c036b3763739274dfff217720b70b0 Mon Sep 17 00:00:00 2001
From: Michael Derynck
Date: Fri, 3 Jun 2022 08:09:47 -0600
Subject: [PATCH] World, meet OnCall!
Co-authored-by: Eve832
Co-authored-by: Francisco Montes de Oca
Co-authored-by: Ildar Iskhakov
Co-authored-by: Innokentii Konstantinov
Co-authored-by: Julia
Co-authored-by: maskin25
Co-authored-by: Matias Bordese
Co-authored-by: Matvey Kukuy
Co-authored-by: Michael Derynck
Co-authored-by: Richard Hartmann
Co-authored-by: Robby Milo
Co-authored-by: Timur Olzhabayev
Co-authored-by: Vadim Stepanov
Co-authored-by: Yulia Shanyrova
---
.dockerignore | 8 +
.drone.yml | 230 +
.env.example | 31 +
.github/issue_and_pr_commands.json | 10 +
.github/workflows/backend-ci.yml | 39 +
.github/workflows/frontend-ci.yml | 21 +
.github/workflows/issue_commands.yml | 23 +
.github/workflows/publish_docs.yml | 40 +
.gitignore | 47 +
.pre-commit-config.yaml | 46 +
CHANGELOG.md | 3 +
DEVELOPER.md | 385 +
LICENSE | 661 +
LICENSING.md | 17 +
README.md | 76 +
SECURITY.md | 20 +
developer-docker-compose.yml | 69 +
docs/Makefile | 11 +
docs/README.md | 8 +
docs/sources/_index.md | 16 +
docs/sources/calendar-schedules/_index.md | 15 +
.../calendar-schedules/about-calendars.md | 13 +
.../calendar-schedules/create-calendar.md | 44 +
docs/sources/chat-options/_index.md | 13 +
docs/sources/chat-options/configure-slack.md | 33 +
.../chat-options/configure-telegram.md | 39 +
docs/sources/configure-notifications.md | 121 +
docs/sources/configure-user-settings.md | 48 +
docs/sources/escalation-policies/_index.md | 13 +
.../about-escalation-policies.md | 22 +
.../configure-escalation-policies.md | 42 +
docs/sources/integrations/_index.md | 11 +
docs/sources/integrations/add-alertmanager.md | 58 +
.../integrations/add-grafana-alerting.md | 60 +
docs/sources/integrations/add-integration.md | 22 +
docs/sources/integrations/add-zabbix.md | 137 +
docs/sources/integrations/webhooks/_index.md | 12 +
.../webhooks/add-webhook-integration.md | 39 +
.../webhooks/configure-outgoing-webhooks.md | 39 +
.../webhooks/create-custom-templates.md | 149 +
docs/sources/manage-alert-groups.md | 14 +
docs/sources/oncall-api-reference/_index.md | 63 +
.../oncall-api-reference/alertgroups.md | 68 +
docs/sources/oncall-api-reference/alerts.md | 110 +
.../oncall-api-reference/escalation_chains.md | 102 +
.../escalation_policies.md | 133 +
.../oncall-api-reference/integrations.md | 288 +
.../oncall-api-reference/on_call_shifts.md | 220 +
.../oncall-api-reference/outgoing_webhooks.md | 38 +
.../personal_notification_rules.md | 142 +
.../postmortem_messages.md | 141 +
.../oncall-api-reference/postmortems.md | 152 +
docs/sources/oncall-api-reference/routes.md | 188 +
.../sources/oncall-api-reference/schedules.md | 206 +
.../oncall-api-reference/slack_channels.md | 38 +
.../oncall-api-reference/user_groups.md | 46 +
docs/sources/oncall-api-reference/users.md | 91 +
engine/Dockerfile | 20 +
engine/Dockerfile.all-in-one | 38 +
engine/apps/__init__.py | 0
engine/apps/alerts/__init__.py | 0
engine/apps/alerts/admin.py | 71 +
engine/apps/alerts/constants.py | 12 +
.../alerts/escalation_snapshot/__init__.py | 1 +
.../escalation_snapshot_mixin.py | 272 +
.../serializers/__init__.py | 4 +
.../serializers/channel_filter_snapshot.py | 23 +
.../serializers/escalation_chain_snapshot.py | 14 +
.../serializers/escalation_policy_snapshot.py | 85 +
.../serializers/escalation_snapshot.py | 28 +
.../snapshot_classes/__init__.py | 4 +
.../channel_filter_snapshot.py | 15 +
.../escalation_chain_snapshot.py | 12 +
.../escalation_policy_snapshot.py | 485 +
.../snapshot_classes/escalation_snapshot.py | 131 +
.../apps/alerts/escalation_snapshot/utils.py | 29 +
.../grafana_alerting_sync_manager/__init__.py | 0
.../grafana_alerting_sync.py | 421 +
.../alerts/incident_appearance/__init__.py | 0
.../incident_appearance/renderers/__init__.py | 0
.../renderers/base_renderer.py | 28 +
.../renderers/constants.py | 1 +
.../renderers/email_renderer.py | 42 +
.../renderers/phone_call_renderer.py | 33 +
.../renderers/slack_renderer.py | 391 +
.../renderers/sms_renderer.py | 32 +
.../renderers/telegram_renderer.py | 64 +
.../renderers/web_renderer.py | 34 +
.../templaters/__init__.py | 7 +
.../templaters/alert_templater.py | 183 +
.../templaters/email_templater.py | 18 +
.../templaters/phone_call_templater.py | 31 +
.../templaters/slack_templater.py | 14 +
.../templaters/sms_templater.py | 27 +
.../templaters/telegram_templater.py | 29 +
.../templaters/web_templater.py | 36 +
.../alerts/incident_log_builder/__init__.py | 1 +
.../incident_log_builder.py | 682 +
.../apps/alerts/integration_options_mixin.py | 80 +
.../migrations/0001_squashed_initial.py | 276 +
.../migrations/0002_squashed_initial.py | 310 +
...03_squashed_create_demo_token_instances.py | 178 +
engine/apps/alerts/migrations/__init__.py | 0
engine/apps/alerts/models/__init__.py | 15 +
engine/apps/alerts/models/alert.py | 273 +
engine/apps/alerts/models/alert_group.py | 1616 ++
.../apps/alerts/models/alert_group_counter.py | 30 +
.../alerts/models/alert_group_log_record.py | 560 +
.../alerts/models/alert_manager_models.py | 64 +
.../alerts/models/alert_receive_channel.py | 716 +
engine/apps/alerts/models/channel_filter.py | 186 +
engine/apps/alerts/models/custom_button.py | 184 +
engine/apps/alerts/models/escalation_chain.py | 70 +
.../apps/alerts/models/escalation_policy.py | 361 +
.../models/grafana_alerting_contact_point.py | 22 +
engine/apps/alerts/models/invitation.py | 123 +
.../apps/alerts/models/maintainable_object.py | 188 +
engine/apps/alerts/models/resolution_note.py | 208 +
.../alerts/models/user_has_notification.py | 18 +
engine/apps/alerts/representative.py | 23 +
engine/apps/alerts/signals.py | 59 +
engine/apps/alerts/tasks/__init__.py | 27 +
.../apps/alerts/tasks/acknowledge_reminder.py | 137 +
.../alerts/tasks/cache_alert_group_for_web.py | 54 +
.../calculcate_escalation_finish_time.py | 15 +
engine/apps/alerts/tasks/call_ack_url.py | 49 +
.../alerts/tasks/check_escalation_finished.py | 48 +
.../apps/alerts/tasks/compare_escalations.py | 4 +
.../create_contact_points_for_datasource.py | 44 +
.../apps/alerts/tasks/custom_button_result.py | 89 +
.../apps/alerts/tasks/delete_alert_group.py | 26 +
engine/apps/alerts/tasks/distribute_alert.py | 50 +
.../apps/alerts/tasks/escalate_alert_group.py | 102 +
.../invalidate_web_cache_for_alert_group.py | 32 +
.../tasks/invite_user_to_join_incident.py | 71 +
engine/apps/alerts/tasks/maintenance.py | 138 +
engine/apps/alerts/tasks/notify_all.py | 96 +
engine/apps/alerts/tasks/notify_group.py | 122 +
.../tasks/notify_ical_schedule_shift.py | 403 +
engine/apps/alerts/tasks/notify_user.py | 417 +
...resolve_alert_group_by_source_if_needed.py | 28 +
.../tasks/resolve_alert_group_if_needed.py | 31 +
.../apps/alerts/tasks/resolve_by_last_step.py | 13 +
.../alerts/tasks/send_alert_group_signal.py | 17 +
.../tasks/send_update_log_report_signal.py | 24 +
.../tasks/send_update_postmortem_signal.py | 29 +
.../send_update_resolution_note_signal.py | 27 +
.../sync_grafana_alerting_contact_points.py | 13 +
engine/apps/alerts/tasks/task_logger.py | 6 +
engine/apps/alerts/tasks/unsilence.py | 48 +
engine/apps/alerts/tasks/wipe.py | 15 +
.../alerts/terraform_renderer/__init__.py | 2 +
.../terraform_file_renderer.py | 782 +
.../terraform_state_renderer.py | 122 +
engine/apps/alerts/tests/__init__.py | 0
engine/apps/alerts/tests/conftest.py | 11 +
engine/apps/alerts/tests/factories.py | 84 +
engine/apps/alerts/tests/test_alert_group.py | 97 +
.../alerts/tests/test_alert_group_renderer.py | 145 +
.../apps/alerts/tests/test_alert_manager.py | 135 +
.../tests/test_alert_receiver_channel.py | 149 +
.../apps/alerts/tests/test_channel_filter.py | 101 +
.../test_check_escalation_finished_task.py | 45 +
.../apps/alerts/tests/test_custom_button.py | 63 +
.../alerts/tests/test_default_templates.py | 108 +
.../alerts/tests/test_escalation_chain.py | 55 +
.../tests/test_escalation_policy_snapshot.py | 516 +
.../alerts/tests/test_escalation_snapshot.py | 228 +
.../alerts/tests/test_incident_log_builder.py | 39 +
engine/apps/alerts/tests/test_maintenance.py | 220 +
.../tests/test_notify_ical_schedule_shift.py | 63 +
engine/apps/alerts/tests/test_notify_user.py | 120 +
.../apps/alerts/tests/test_representative.py | 45 +
engine/apps/alerts/tests/test_silence.py | 71 +
.../alerts/tests/test_terraform_renderer.py | 147 +
engine/apps/alerts/tests/test_utils.py | 14 +
engine/apps/alerts/tests/test_wipe.py | 20 +
engine/apps/alerts/utils.py | 91 +
engine/apps/api/__init__.py | 0
engine/apps/api/permissions/__init__.py | 5 +
engine/apps/api/permissions/actions.py | 27 +
engine/apps/api/permissions/constants.py | 14 +
engine/apps/api/permissions/methods.py | 12 +
engine/apps/api/permissions/owner.py | 24 +
engine/apps/api/permissions/roles.py | 49 +
engine/apps/api/response_renderers.py | 13 +
engine/apps/api/serializers/__init__.py | 0
engine/apps/api/serializers/alert.py | 22 +
engine/apps/api/serializers/alert_group.py | 195 +
.../api/serializers/alert_receive_channel.py | 656 +
engine/apps/api/serializers/channel_filter.py | 186 +
engine/apps/api/serializers/custom_button.py | 68 +
.../api/serializers/custom_serializers.py | 14 +
.../apps/api/serializers/escalation_chain.py | 31 +
.../apps/api/serializers/escalation_policy.py | 231 +
.../api/serializers/integration_heartbeat.py | 69 +
engine/apps/api/serializers/live_setting.py | 37 +
engine/apps/api/serializers/organization.py | 175 +
.../serializers/organization_log_record.py | 38 +
.../organization_slack_settings.py | 20 +
.../apps/api/serializers/public_api_token.py | 13 +
.../apps/api/serializers/resolution_note.py | 64 +
engine/apps/api/serializers/schedule_base.py | 85 +
.../apps/api/serializers/schedule_calendar.py | 57 +
engine/apps/api/serializers/schedule_ical.py | 83 +
.../api/serializers/schedule_polymorphic.py | 42 +
.../apps/api/serializers/schedule_reminder.py | 15 +
engine/apps/api/serializers/slack_channel.py | 13 +
.../api/serializers/slack_user_identity.py | 18 +
engine/apps/api/serializers/team.py | 16 +
engine/apps/api/serializers/telegram.py | 24 +
engine/apps/api/serializers/user.py | 136 +
engine/apps/api/serializers/user_group.py | 11 +
.../serializers/user_notification_policy.py | 134 +
engine/apps/api/tasks.py | 55 +
engine/apps/api/tests/__init__.py | 0
engine/apps/api/tests/conftest.py | 68 +
engine/apps/api/tests/test_alert_group.py | 1471 ++
.../api/tests/test_alert_receive_channel.py | 662 +
.../test_alert_receive_channel_template.py | 269 +
engine/apps/api/tests/test_channel_filter.py | 488 +
engine/apps/api/tests/test_custom_button.py | 379 +
.../apps/api/tests/test_escalation_chain.py | 39 +
.../apps/api/tests/test_escalation_policy.py | 866 +
engine/apps/api/tests/test_features.py | 59 +
engine/apps/api/tests/test_gitops.py | 56 +
.../api/tests/test_integration_heartbeat.py | 286 +
engine/apps/api/tests/test_live_settings.py | 100 +
engine/apps/api/tests/test_maintenance.py | 172 +
engine/apps/api/tests/test_organization.py | 190 +
.../api/tests/test_organization_log_record.py | 243 +
.../api/tests/test_postmortem_messages.py | 395 +
.../tests/test_preview_template_options.py | 18 +
.../api/tests/test_route_regex_debugger.py | 28 +
engine/apps/api/tests/test_schedule_export.py | 155 +
engine/apps/api/tests/test_schedules.py | 667 +
.../api/tests/test_set_general_log_channel.py | 32 +
engine/apps/api/tests/test_slack_channels.py | 64 +
.../api/tests/test_slack_team_settings.py | 181 +
engine/apps/api/tests/test_subscription.py | 39 +
engine/apps/api/tests/test_team.py | 87 +
.../apps/api/tests/test_telegram_channel.py | 266 +
.../apps/api/tests/test_terraform_renderer.py | 25 +
engine/apps/api/tests/test_user.py | 1484 ++
engine/apps/api/tests/test_user_groups.py | 70 +
.../tests/test_user_notification_policy.py | 471 +
.../api/tests/test_user_schedule_export.py | 230 +
engine/apps/api/throttlers/__init__.py | 1 +
.../api/throttlers/demo_alert_throttler.py | 6 +
engine/apps/api/urls.py | 118 +
engine/apps/api/views/__init__.py | 1 +
engine/apps/api/views/alert_group.py | 571 +
.../apps/api/views/alert_receive_channel.py | 206 +
.../views/alert_receive_channel_template.py | 52 +
engine/apps/api/views/apns_device.py | 7 +
engine/apps/api/views/auth.py | 50 +
engine/apps/api/views/channel_filter.py | 140 +
engine/apps/api/views/custom_button.py | 94 +
engine/apps/api/views/escalation_chain.py | 136 +
engine/apps/api/views/escalation_policy.py | 171 +
engine/apps/api/views/features.py | 51 +
engine/apps/api/views/gitops.py | 33 +
.../apps/api/views/integration_heartbeat.py | 78 +
engine/apps/api/views/live_setting.py | 88 +
engine/apps/api/views/maintenance.py | 117 +
engine/apps/api/views/organization.py | 93 +
.../apps/api/views/organization_log_record.py | 128 +
.../api/views/preview_template_options.py | 19 +
engine/apps/api/views/public_api_tokens.py | 55 +
engine/apps/api/views/resolution_note.py | 54 +
engine/apps/api/views/route_regex_debugger.py | 59 +
engine/apps/api/views/schedule.py | 333 +
engine/apps/api/views/slack_channel.py | 32 +
engine/apps/api/views/slack_team_settings.py | 69 +
engine/apps/api/views/subscription.py | 16 +
engine/apps/api/views/team.py | 24 +
engine/apps/api/views/telegram_channels.py | 48 +
engine/apps/api/views/user.py | 486 +
engine/apps/api/views/user_group.py | 23 +
.../api/views/user_notification_policy.py | 206 +
.../apps/api_for_grafana_incident/__init__.py | 0
engine/apps/api_for_grafana_incident/apps.py | 6 +
.../api_for_grafana_incident/serializers.py | 26 +
engine/apps/api_for_grafana_incident/urls.py | 17 +
engine/apps/api_for_grafana_incident/views.py | 13 +
engine/apps/auth_token/__init__.py | 0
engine/apps/auth_token/auth.py | 246 +
engine/apps/auth_token/constants.py | 12 +
engine/apps/auth_token/crypto.py | 45 +
engine/apps/auth_token/exceptions.py | 2 +
.../migrations/0001_squashed_initial.py | 107 +
.../migrations/0002_squashed_initial.py | 96 +
...03_squashed_create_demo_token_instances.py | 40 +
engine/apps/auth_token/migrations/__init__.py | 0
engine/apps/auth_token/models/__init__.py | 6 +
.../apps/auth_token/models/api_auth_token.py | 32 +
.../apps/auth_token/models/base_auth_token.py | 44 +
.../models/mobile_app_auth_token.py | 29 +
.../models/mobile_app_verification_token.py | 48 +
.../auth_token/models/plugin_auth_token.py | 53 +
.../models/schedule_export_auth_token.py | 45 +
.../auth_token/models/slack_auth_token.py | 48 +
.../models/user_schedule_export_auth_token.py | 36 +
engine/apps/auth_token/tests/__init__.py | 0
engine/apps/auth_token/tests/test_crypto.py | 23 +
engine/apps/base/__init__.py | 0
engine/apps/base/admin.py | 24 +
engine/apps/base/constants.py | 23 +
engine/apps/base/messaging.py | 73 +
.../base/migrations/0001_squashed_initial.py | 89 +
.../base/migrations/0002_squashed_initial.py | 46 +
...03_squashed_create_demo_token_instances.py | 74 +
engine/apps/base/migrations/__init__.py | 0
engine/apps/base/models/__init__.py | 6 +
engine/apps/base/models/dynamic_setting.py | 39 +
.../models/failed_to_invoke_celery_task.py | 18 +
engine/apps/base/models/live_setting.py | 174 +
.../base/models/organization_log_record.py | 317 +
.../base/models/user_notification_policy.py | 293 +
.../user_notification_policy_log_record.py | 318 +
engine/apps/base/tasks.py | 30 +
engine/apps/base/tests/__init__.py | 0
engine/apps/base/tests/factories.py | 25 +
engine/apps/base/tests/messaging_backend.py | 27 +
engine/apps/base/tests/test_live_settings.py | 72 +
engine/apps/base/tests/test_messaging.py | 19 +
.../tests/test_organization_log_record.py | 18 +
.../tests/test_user_notification_policy.py | 82 +
...est_user_notification_policy_log_record.py | 63 +
engine/apps/base/utils.py | 125 +
engine/apps/grafana_plugin/__init__.py | 0
.../apps/grafana_plugin/helpers/__init__.py | 2 +
engine/apps/grafana_plugin/helpers/client.py | 139 +
engine/apps/grafana_plugin/helpers/gcom.py | 103 +
engine/apps/grafana_plugin/permissions.py | 50 +
engine/apps/grafana_plugin/tasks/__init__.py | 1 +
engine/apps/grafana_plugin/tasks/sync.py | 78 +
engine/apps/grafana_plugin/tests/test_sync.py | 100 +
engine/apps/grafana_plugin/urls.py | 19 +
engine/apps/grafana_plugin/views/__init__.py | 5 +
engine/apps/grafana_plugin/views/install.py | 24 +
.../views/self_hosted_install.py | 55 +
engine/apps/grafana_plugin/views/status.py | 52 +
engine/apps/grafana_plugin/views/sync.py | 73 +
.../grafana_plugin/views/sync_organization.py | 25 +
.../grafana_plugin_management/__init__.py | 0
engine/apps/grafana_plugin_management/urls.py | 13 +
.../views/__init__.py | 1 +
.../views/plugin_installations.py | 64 +
engine/apps/heartbeat/__init__.py | 0
engine/apps/heartbeat/admin.py | 5 +
.../migrations/0001_squashed_initial.py | 55 +
engine/apps/heartbeat/migrations/__init__.py | 0
engine/apps/heartbeat/models.py | 244 +
engine/apps/heartbeat/tasks.py | 80 +
engine/apps/heartbeat/tests/__init__.py | 0
engine/apps/heartbeat/tests/factories.py | 10 +
.../tests/test_integration_heartbeat.py | 86 +
engine/apps/integrations/__init__.py | 0
engine/apps/integrations/metadata/__init__.py | 0
.../metadata/configuration/alertmanager.py | 254 +
.../metadata/configuration/amazon_sns.py | 99 +
.../configuration/formatted_webhook.py | 62 +
.../metadata/configuration/grafana.py | 287 +
.../configuration/grafana_alerting.py | 256 +
.../metadata/configuration/heartbeat.py | 31 +
.../metadata/configuration/inbound_email.py | 53 +
.../metadata/configuration/maintenance.py | 53 +
.../metadata/configuration/manual.py | 77 +
.../metadata/configuration/slack_channel.py | 44 +
.../metadata/configuration/webhook.py | 65 +
.../metadata/heartbeat/__init__.py | 13 +
.../heartbeat/_heartbeat_text_creator.py | 66 +
.../metadata/heartbeat/alertmanager.py | 35 +
.../metadata/heartbeat/elastalert.py | 37 +
.../metadata/heartbeat/formatted_webhook.py | 37 +
.../metadata/heartbeat/grafana.py | 31 +
.../integrations/metadata/heartbeat/prtg.py | 37 +
.../metadata/heartbeat/webhook.py | 38 +
.../integrations/metadata/heartbeat/zabbix.py | 37 +
engine/apps/integrations/mixins/__init__.py | 7 +
.../mixins/alert_channel_defining_mixin.py | 81 +
.../mixins/browsable_instruction_mixin.py | 35 +
.../integrations/mixins/ratelimit_mixin.py | 229 +
engine/apps/integrations/tasks.py | 166 +
.../heartbeat_instructions/alertmanager.html | 41 +
.../heartbeat_instructions/elastalert.html | 11 +
.../formatted_webhook.html | 16 +
.../heartbeat_instructions/grafana.html | 37 +
.../heartbeat_instructions/prtg.html | 3 +
.../heartbeat_instructions/webhook.html | 16 +
.../heartbeat_instructions/zabbix.html | 33 +
.../templates/heartbeat_link.html | 31 +
.../html/integration_alertmanager.html | 33 +
.../html/integration_amazon_sns.html | 28 +
.../templates/html/integration_curler.html | 11 +
.../templates/html/integration_datadog.html | 33 +
.../templates/html/integration_demo.html | 1 +
.../html/integration_elastalert.html | 22 +
.../templates/html/integration_fabric.html | 26 +
.../html/integration_formatted_webhook.html | 47 +
.../templates/html/integration_grafana.html | 41 +
.../html/integration_grafana_alerting.html | 62 +
.../templates/html/integration_heartbeat.html | 4 +
.../html/integration_inbound_email.html | 15 +
.../templates/html/integration_kapacitor.html | 22 +
.../templates/html/integration_manual.html | 7 +
.../templates/html/integration_newrelic.html | 27 +
.../templates/html/integration_pagerduty.html | 22 +
.../templates/html/integration_pingdom.html | 27 +
.../templates/html/integration_prtg.html | 98 +
.../templates/html/integration_sentry.html | 22 +
.../html/integration_sentry_platform.html | 6 +
.../html/integration_slack_channel.html | 5 +
.../html/integration_stackdriver.html | 26 +
.../html/integration_uptimerobot.html | 51 +
.../templates/html/integration_webhook.html | 36 +
.../templates/html/integration_zabbix.html | 72 +
.../templates/integration_link.html | 83 +
engine/apps/integrations/tests/__init__.py | 0
.../tests/test_heartbeat_metadata.py | 18 +
.../apps/integrations/tests/test_ratelimit.py | 99 +
engine/apps/integrations/tests/test_tasks.py | 31 +
engine/apps/integrations/tests/test_views.py | 46 +
engine/apps/integrations/urls.py | 46 +
engine/apps/integrations/views.py | 467 +
engine/apps/migration_tool/__init__.py | 0
engine/apps/migration_tool/constants.py | 7 +
.../migrations/0001_squashed_initial.py | 33 +
...2_amixrmigrationtaskstatus_organization.py | 22 +
.../migration_tool/migrations/__init__.py | 0
engine/apps/migration_tool/models/__init__.py | 2 +
.../models/amixr_migration_task_status.py | 27 +
.../migration_tool/models/locked_alert.py | 5 +
engine/apps/migration_tool/tasks.py | 612 +
engine/apps/migration_tool/urls.py | 12 +
engine/apps/migration_tool/utils.py | 35 +
engine/apps/migration_tool/views/__init__.py | 0
.../views/customers_migration_tool.py | 186 +
engine/apps/oss_installation/__init__.py | 0
.../migrations/0001_squashed_initial.py | 33 +
.../oss_installation/migrations/__init__.py | 0
.../apps/oss_installation/models/__init__.py | 2 +
.../apps/oss_installation/models/heartbeat.py | 29 +
.../models/oss_installation.py | 9 +
engine/apps/oss_installation/tasks.py | 95 +
engine/apps/oss_installation/urls.py | 7 +
engine/apps/oss_installation/usage_stats.py | 50 +
engine/apps/oss_installation/utils.py | 70 +
.../apps/oss_installation/views/__init__.py | 1 +
.../views/cloud_heartbeat_status.py | 15 +
engine/apps/public_api/__init__.py | 0
engine/apps/public_api/constants.py | 69 +
engine/apps/public_api/custom_renderers.py | 19 +
engine/apps/public_api/helpers.py | 25 +
.../apps/public_api/serializers/__init__.py | 15 +
engine/apps/public_api/serializers/action.py | 17 +
engine/apps/public_api/serializers/alerts.py | 21 +
.../serializers/escalation_chains.py | 24 +
.../serializers/escalation_policies.py | 306 +
.../apps/public_api/serializers/incidents.py | 47 +
.../public_api/serializers/integrations.py | 191 +
.../serializers/integtration_heartbeat.py | 11 +
.../public_api/serializers/maintenance.py | 36 +
.../public_api/serializers/on_call_shifts.py | 325 +
.../public_api/serializers/organizations.py | 18 +
.../personal_notification_rules.py | 155 +
.../serializers/resolution_notes.py | 53 +
engine/apps/public_api/serializers/routes.py | 187 +
.../public_api/serializers/schedules_base.py | 92 +
.../serializers/schedules_calendar.py | 106 +
.../public_api/serializers/schedules_ical.py | 73 +
.../serializers/schedules_polymorphic.py | 47 +
.../public_api/serializers/slack_channel.py | 9 +
engine/apps/public_api/serializers/teams.py | 11 +
.../public_api/serializers/user_groups.py | 27 +
engine/apps/public_api/serializers/users.py | 60 +
engine/apps/public_api/tests/__init__.py | 0
engine/apps/public_api/tests/conftest.py | 243 +
engine/apps/public_api/tests/test_alerts.py | 172 +
.../public_api/tests/test_custom_actions.py | 89 +
.../tests/test_demo_token/__init__.py | 0
.../tests/test_demo_token/test_alerts.py | 110 +
.../test_demo_token/test_custom_actions.py | 32 +
.../test_escalation_policies.py | 169 +
.../tests/test_demo_token/test_incidents.py | 82 +
.../test_demo_token/test_integrations.py | 239 +
.../test_demo_token/test_on_call_shift.py | 172 +
.../test_personal_notification_rules.py | 225 +
.../test_demo_token/test_resolution_notes.py | 117 +
.../tests/test_demo_token/test_routes.py | 182 +
.../tests/test_demo_token/test_schedules.py | 164 +
.../test_demo_token/test_slack_channels.py | 34 +
.../tests/test_demo_token/test_user_groups.py | 36 +
.../tests/test_demo_token/test_users.py | 91 +
.../public_api/tests/test_escalation_chain.py | 75 +
.../tests/test_escalation_policies.py | 221 +
.../apps/public_api/tests/test_incidents.py | 196 +
.../public_api/tests/test_integrations.py | 491 +
.../apps/public_api/tests/test_maintenance.py | 48 +
.../public_api/tests/test_on_call_shifts.py | 239 +
.../tests/test_personal_notification_rules.py | 323 +
.../apps/public_api/tests/test_ratelimit.py | 33 +
.../public_api/tests/test_resolution_notes.py | 221 +
engine/apps/public_api/tests/test_routes.py | 226 +
.../public_api/tests/test_schedule_export.py | 75 +
.../apps/public_api/tests/test_schedules.py | 526 +
.../public_api/tests/test_slack_channels.py | 38 +
engine/apps/public_api/tests/test_teams.py | 65 +
.../apps/public_api/tests/test_user_groups.py | 104 +
engine/apps/public_api/tests/test_users.py | 142 +
engine/apps/public_api/throttlers/__init__.py | 0
.../public_api/throttlers/user_throttle.py | 43 +
engine/apps/public_api/urls.py | 33 +
engine/apps/public_api/views/__init__.py | 17 +
engine/apps/public_api/views/action.py | 34 +
engine/apps/public_api/views/alerts.py | 44 +
.../public_api/views/escalation_chains.py | 85 +
.../public_api/views/escalation_policies.py | 88 +
engine/apps/public_api/views/incidents.py | 98 +
engine/apps/public_api/views/info.py | 17 +
engine/apps/public_api/views/integrations.py | 82 +
.../views/maintaiable_object_mixin.py | 48 +
.../apps/public_api/views/on_call_shifts.py | 82 +
engine/apps/public_api/views/organizations.py | 42 +
.../views/personal_notifications.py | 119 +
.../apps/public_api/views/resolution_notes.py | 67 +
engine/apps/public_api/views/routes.py | 105 +
engine/apps/public_api/views/schedules.py | 111 +
.../apps/public_api/views/slack_channels.py | 32 +
engine/apps/public_api/views/teams.py | 27 +
engine/apps/public_api/views/user_groups.py | 29 +
engine/apps/public_api/views/users.py | 71 +
engine/apps/schedules/__init__.py | 0
engine/apps/schedules/admin.py | 9 +
engine/apps/schedules/ical_events/__init__.py | 5 +
.../schedules/ical_events/adapter/__init__.py | 0
.../amixr_recurring_ical_events_adapter.py | 88 +
.../adapter/recurring_ical_events_adapter.py | 17 +
.../schedules/ical_events/proxy/__init__.py | 0
.../schedules/ical_events/proxy/ical_proxy.py | 19 +
engine/apps/schedules/ical_utils.py | 696 +
.../migrations/0001_squashed_initial.py | 94 +
.../migrations/0002_squashed_initial.py | 67 +
engine/apps/schedules/migrations/__init__.py | 0
engine/apps/schedules/models/__init__.py | 2 +
.../schedules/models/custom_on_call_shift.py | 338 +
.../apps/schedules/models/on_call_schedule.py | 395 +
engine/apps/schedules/tasks/__init__.py | 16 +
.../apps/schedules/tasks/drop_cached_ical.py | 30 +
.../notify_about_empty_shifts_in_schedule.py | 140 +
.../tasks/notify_about_gaps_in_schedule.py | 123 +
.../schedules/tasks/refresh_ical_files.py | 63 +
engine/apps/schedules/tests/__init__.py | 0
.../calendars/calendar_with_all_day_event.ics | 62 +
.../calendar_with_edited_recurring_events.ics | 48 +
.../calendar_with_recurring_event.ics | 33 +
engine/apps/schedules/tests/conftest.py | 17 +
engine/apps/schedules/tests/factories.py | 32 +
.../tests/test_amixr_users_in_ical.py | 103 +
.../tests/test_custom_on_call_shift.py | 201 +
.../apps/schedules/tests/test_ical_proxy.py | 52 +
engine/apps/sendgridapp/__init__.py | 0
engine/apps/sendgridapp/constants.py | 49 +
.../apps/sendgridapp/migrations/__init__.py | 0
engine/apps/sendgridapp/models.py | 185 +
engine/apps/sendgridapp/parse.py | 119 +
engine/apps/sendgridapp/permissions.py | 14 +
.../templates/email_notification.html | 26 +
.../templates/email_verification.html | 15 +
engine/apps/sendgridapp/tests/__init__.py | 0
engine/apps/sendgridapp/tests/factories.py | 8 +
engine/apps/sendgridapp/tests/test_emails.py | 135 +
engine/apps/sendgridapp/urls.py | 9 +
engine/apps/sendgridapp/verification_token.py | 20 +
engine/apps/sendgridapp/views.py | 29 +
engine/apps/slack/__init__.py | 0
engine/apps/slack/admin.py | 31 +
engine/apps/slack/constants.py | 11 +
.../slack/migrations/0001_squashed_initial.py | 132 +
.../slack/migrations/0002_squashed_initial.py | 53 +
...03_squashed_create_demo_token_instances.py | 47 +
engine/apps/slack/migrations/__init__.py | 0
engine/apps/slack/models/__init__.py | 6 +
.../apps/slack/models/slack_action_record.py | 29 +
engine/apps/slack/models/slack_channel.py | 45 +
engine/apps/slack/models/slack_message.py | 237 +
.../apps/slack/models/slack_team_identity.py | 160 +
.../apps/slack/models/slack_user_identity.py | 227 +
engine/apps/slack/models/slack_usergroup.py | 177 +
engine/apps/slack/representatives/__init__.py | 0
.../alert_group_representative.py | 299 +
.../representatives/user_representative.py | 47 +
engine/apps/slack/scenarios/__init__.py | 0
.../slack/scenarios/alertgroup_appearance.py | 357 +
.../apps/slack/scenarios/distribute_alerts.py | 1333 ++
.../slack/scenarios/escalation_delivery.py | 47 +
.../slack/scenarios/notification_delivery.py | 96 +
engine/apps/slack/scenarios/onboarding.py | 43 +
engine/apps/slack/scenarios/profile_update.py | 55 +
engine/apps/slack/scenarios/public_menu.py | 536 +
.../apps/slack/scenarios/resolution_note.py | 607 +
engine/apps/slack/scenarios/scenario_step.py | 485 +
engine/apps/slack/scenarios/schedules.py | 330 +
engine/apps/slack/scenarios/slack_channel.py | 128 +
.../scenarios/slack_channel_integration.py | 193 +
engine/apps/slack/scenarios/slack_renderer.py | 51 +
.../apps/slack/scenarios/slack_usergroup.py | 88 +
engine/apps/slack/scenarios/step_mixins.py | 70 +
engine/apps/slack/slack_client/__init__.py | 1 +
engine/apps/slack/slack_client/exceptions.py | 22 +
.../apps/slack/slack_client/slack_client.py | 109 +
.../slack/slack_client/slack_client_server.py | 26 +
engine/apps/slack/slack_formatter.py | 99 +
engine/apps/slack/tasks.py | 781 +
.../slack_teams_summary_change_list.html | 76 +
engine/apps/slack/tests/__init__.py | 0
engine/apps/slack/tests/conftest.py | 12 +
engine/apps/slack/tests/factories.py | 59 +
.../slack/tests/test_create_message_blocks.py | 56 +
.../slack/tests/test_parse_slack_usernames.py | 56 +
.../tests/test_populate_slack_channels.py | 47 +
engine/apps/slack/tests/test_reset_slack.py | 34 +
.../tests/test_scenario_steps/__init__.py | 0
.../test_distribute_alerts.py | 35 +
.../test_resolution_note.py | 103 +
.../test_slack_usergroup_steps.py | 120 +
engine/apps/slack/tests/test_user_group.py | 69 +
engine/apps/slack/urls.py | 24 +
engine/apps/slack/utils.py | 66 +
engine/apps/slack/views.py | 545 +
engine/apps/social_auth/__init__.py | 0
engine/apps/social_auth/backends.py | 164 +
.../live_setting_django_strategy.py | 44 +
engine/apps/social_auth/middlewares.py | 28 +
engine/apps/social_auth/pipeline.py | 102 +
engine/apps/social_auth/urls.py | 11 +
engine/apps/social_auth/views.py | 50 +
engine/apps/telegram/__init__.py | 0
.../telegram/alert_group_representative.py | 134 +
engine/apps/telegram/apps.py | 8 +
engine/apps/telegram/client.py | 147 +
engine/apps/telegram/decorators.py | 86 +
.../migrations/0001_squashed_initial.py | 76 +
engine/apps/telegram/migrations/__init__.py | 0
engine/apps/telegram/models/__init__.py | 5 +
.../telegram/models/connectors/__init__.py | 0
.../telegram/models/connectors/channel.py | 133 +
.../telegram/models/connectors/personal.py | 166 +
engine/apps/telegram/models/message.py | 58 +
.../telegram/models/verification/__init__.py | 0
.../telegram/models/verification/channel.py | 73 +
.../telegram/models/verification/personal.py | 46 +
engine/apps/telegram/renderers/__init__.py | 0
engine/apps/telegram/renderers/keyboard.py | 89 +
engine/apps/telegram/renderers/message.py | 99 +
engine/apps/telegram/signals.py | 10 +
engine/apps/telegram/tasks.py | 191 +
engine/apps/telegram/tests/__init__.py | 0
engine/apps/telegram/tests/factories.py | 45 +
.../telegram/tests/test_keyboard_renderer.py | 138 +
.../telegram/tests/test_message_renderer.py | 208 +
engine/apps/telegram/tests/test_models.py | 24 +
.../telegram/tests/test_update_handlers.py | 123 +
engine/apps/telegram/updates/__init__.py | 0
.../updates/update_handlers/__init__.py | 6 +
.../updates/update_handlers/button_press.py | 97 +
.../channel_to_group_forward.py | 81 +
.../updates/update_handlers/start_message.py | 35 +
.../updates/update_handlers/update_handler.py | 21 +
.../update_handlers/verification/__init__.py | 0
.../update_handlers/verification/channel.py | 91 +
.../update_handlers/verification/personal.py | 48 +
.../apps/telegram/updates/update_manager.py | 76 +
engine/apps/telegram/urls.py | 7 +
engine/apps/telegram/utils.py | 20 +
engine/apps/telegram/views.py | 16 +
engine/apps/twilioapp/__init__.py | 0
engine/apps/twilioapp/admin.py | 17 +
engine/apps/twilioapp/constants.py | 108 +
.../migrations/0001_squashed_initial.py | 60 +
engine/apps/twilioapp/migrations/__init__.py | 0
engine/apps/twilioapp/models/__init__.py | 3 +
engine/apps/twilioapp/models/phone_call.py | 216 +
engine/apps/twilioapp/models/sms_message.py | 185 +
.../twilioapp/models/twilio_log_record.py | 26 +
engine/apps/twilioapp/phone_manager.py | 75 +
engine/apps/twilioapp/tests/__init__.py | 0
engine/apps/twilioapp/tests/factories.py | 13 +
.../apps/twilioapp/tests/test_phone_calls.py | 270 +
.../apps/twilioapp/tests/test_sms_message.py | 142 +
engine/apps/twilioapp/twilio_client.py | 179 +
engine/apps/twilioapp/urls.py | 12 +
engine/apps/twilioapp/utils.py | 67 +
engine/apps/twilioapp/views.py | 74 +
engine/apps/user_management/__init__.py | 0
engine/apps/user_management/admin.py | 20 +
.../migrations/0001_squashed_initial.py | 107 +
...02_squashed_create_demo_token_instances.py | 51 +
.../user_management/migrations/__init__.py | 0
.../apps/user_management/models/__init__.py | 3 +
.../user_management/models/organization.py | 266 +
engine/apps/user_management/models/team.py | 81 +
engine/apps/user_management/models/user.py | 248 +
.../organization_log_creator/__init__.py | 2 +
.../create_organization_log.py | 11 +
.../organization_log_type.py | 52 +
.../subscription_strategy/__init__.py | 1 +
.../base_subsription_strategy.py | 22 +
.../free_public_beta_subscription_strategy.py | 83 +
engine/apps/user_management/sync.py | 100 +
engine/apps/user_management/tests/__init__.py | 0
.../apps/user_management/tests/factories.py | 33 +
...t_free_public_beta_subcription_strategy.py | 84 +
.../tests/test_organization.py | 169 +
.../apps/user_management/tests/test_sync.py | 198 +
.../apps/user_management/tests/test_user.py | 24 +
.../user_management/user_representative.py | 16 +
engine/celery_with_exporter.sh | 40 +
engine/common/__init__.py | 0
engine/common/admin.py | 61 +
engine/common/api_helpers/__init__.py | 0
engine/common/api_helpers/custom_fields.py | 173 +
engine/common/api_helpers/exceptions.py | 25 +
engine/common/api_helpers/filters.py | 88 +
engine/common/api_helpers/mixins.py | 368 +
.../api_helpers/optional_slash_router.py | 21 +
engine/common/api_helpers/paginators.py | 13 +
engine/common/api_helpers/utils.py | 52 +
engine/common/constants/__init__.py | 0
engine/common/constants/role.py | 11 +
engine/common/constants/slack_auth.py | 5 +
engine/common/custom_celery_tasks/__init__.py | 1 +
.../create_alert_base_task.py | 8 +
.../dedicated_queue_retry_task.py | 30 +
.../safe_to_broker_outage_task.py | 21 +
engine/common/exceptions/__init__.py | 1 +
engine/common/exceptions/exceptions.py | 19 +
engine/common/jinja_templater/__init__.py | 2 +
.../jinja_templater/apply_jinja_template.py | 12 +
engine/common/jinja_templater/filters.py | 24 +
.../jinja_templater/jinja_template_env.py | 12 +
.../use_random_readonly_db_manager_mixin.py | 21 +
engine/common/public_primary_keys.py | 66 +
engine/common/tests/__init__.py | 0
engine/common/tests/test_clean_markup.py | 44 +
engine/common/tests/test_urlize.py | 25 +
engine/common/utils.py | 228 +
engine/conftest.py | 678 +
engine/engine/__init__.py | 7 +
engine/engine/celery.py | 43 +
engine/engine/logging/formatters.py | 18 +
.../commands/issue_invite_for_the_frontend.py | 45 +
.../management/commands/restart_escalation.py | 93 +
.../management/commands/start_celery.py | 22 +
engine/engine/middlewares.py | 93 +
engine/engine/parsers.py | 48 +
engine/engine/urls.py | 70 +
engine/engine/views.py | 70 +
engine/engine/wsgi.py | 18 +
engine/manage.py | 15 +
engine/pyproject.toml | 10 +
engine/requirements.txt | 41 +
engine/scripts/start_all_in_one.sh | 34 +
engine/settings/__init__.py | 0
engine/settings/all_in_one.py | 58 +
engine/settings/base.py | 438 +
engine/settings/ci-test.py | 29 +
engine/settings/dev.py | 92 +
engine/settings/prod_without_db.py | 191 +
.../heartbeat_grafana_1.png | Bin 0 -> 181505 bytes
.../heartbeat_grafana_2.png | Bin 0 -> 295624 bytes
.../heartbeat_grafana_3.png | Bin 0 -> 575712 bytes
.../heartbeat_grafana_4.png | Bin 0 -> 655152 bytes
.../heartbeat_grafana_5.png | Bin 0 -> 554303 bytes
.../heartbeat_zabbix_1.png | Bin 0 -> 120086 bytes
.../heartbeat_zabbix_2.png | Bin 0 -> 124180 bytes
.../heartbeat_zabbix_3.png | Bin 0 -> 133993 bytes
.../heartbeat_zabbix_4.png | Bin 0 -> 134825 bytes
.../heartbeat_zabbix_5.png | Bin 0 -> 157699 bytes
engine/static/images/postmortem.gif | Bin 0 -> 172114 bytes
engine/tox.ini | 12 +
engine/uwsgi.ini | 21 +
engine/wait_for_test_mysql_start.sh | 7 +
grafana-plugin/.eslintignore | 1 +
grafana-plugin/.eslintrc.js | 48 +
grafana-plugin/.gitignore | 33 +
grafana-plugin/.prettierrc.js | 3 +
grafana-plugin/.release-it.js | 21 +
grafana-plugin/.stylelintignore | 1 +
grafana-plugin/.stylelintrc | 11 +
grafana-plugin/CHANGELOG.md | 1 +
grafana-plugin/LICENSE | 109 +
.../e2e/features/add-channel-filter.feature | 16 +
.../delete-notification-steps.feature | 13 +
.../e2e/features/steps/addChannelFilter.js | 46 +
grafana-plugin/e2e/features/steps/common.js | 85 +
.../features/steps/deleteNotificationSteps.js | 51 +
grafana-plugin/e2e/features/support/world.js | 40 +
grafana-plugin/e2e/utils/takeScreenshot.js | 10 +
grafana-plugin/grafana-plugin.yml.example | 11 +
grafana-plugin/jest.config.js | 8 +
grafana-plugin/package.json | 83 +
grafana-plugin/plopfile.js | 40 +
grafana-plugin/provisioning/.gitignore | 2 +
grafana-plugin/src/GrafanaPluginRootPage.tsx | 135 +
grafana-plugin/src/README.md | 23 +
.../src/assets/fonts/nucleo/nucleo-icons.eot | Bin 0 -> 18516 bytes
.../src/assets/fonts/nucleo/nucleo-icons.svg | 312 +
.../src/assets/fonts/nucleo/nucleo-icons.ttf | Bin 0 -> 18292 bytes
.../src/assets/fonts/nucleo/nucleo-icons.woff | Bin 0 -> 10220 bytes
.../assets/fonts/nucleo/nucleo-icons.woff2 | Bin 0 -> 8580 bytes
grafana-plugin/src/assets/img/ElastAlert.svg | 1 +
.../src/assets/img/HeartBeatMonitoring.png | Bin 0 -> 20686 bytes
grafana-plugin/src/assets/img/PagerDuty.png | Bin 0 -> 5101 bytes
.../src/assets/img/arrows/arrow_07.svg | 1 +
.../src/assets/img/arrows/arrow_12.svg | 1 +
.../src/assets/img/arrows/arrow_35.svg | 1 +
.../src/assets/img/arrows/arrow_43.svg | 1 +
.../src/assets/img/brand/amixr-logo-blue.png | Bin 0 -> 2469 bytes
.../src/assets/img/brand/amixr-logo-blue.svg | 9 +
.../src/assets/img/brand/amixr-logo.png | Bin 0 -> 6354 bytes
.../assets/img/brand/argon-react-white.png | Bin 0 -> 27130 bytes
.../src/assets/img/brand/argon-react.png | Bin 0 -> 27505 bytes
grafana-plugin/src/assets/img/brand/blue.png | Bin 0 -> 7883 bytes
.../src/assets/img/brand/favicon.png | Bin 0 -> 4976 bytes
grafana-plugin/src/assets/img/brand/white.png | Bin 0 -> 7714 bytes
.../src/assets/img/events_instructions.png | Bin 0 -> 13698 bytes
.../src/assets/img/grafana_icon.svg | 57 +
grafana-plugin/src/assets/img/howto-phone.png | Bin 0 -> 181907 bytes
.../src/assets/img/icons/common/avatar.svg | 6 +
.../src/assets/img/icons/common/github.svg | 12 +
.../src/assets/img/icons/common/google.svg | 17 +
.../src/assets/img/icons/common/slack.svg | 10 +
.../src/assets/img/icons/curler-icon.svg | 11 +
.../src/assets/img/icons/slack-colored.svg | 1 +
.../src/assets/img/integration-logos.png | Bin 0 -> 651630 bytes
.../src/assets/img/prometheus_logo_grey.svg | 19 +
.../img/slack_landing/slack_ai_suggestion.png | Bin 0 -> 69696 bytes
.../slack_install_permissions.png | Bin 0 -> 222518 bytes
.../slack_invitation_message.png | Bin 0 -> 51844 bytes
.../slack_log_and_plan_message.png | Bin 0 -> 122309 bytes
.../img/slack_landing/slack_main_message.png | Bin 0 -> 311136 bytes
.../img/slack_landing/slack_ok_message.png | Bin 0 -> 94847 bytes
.../slack_oncall_schedule_reminder.png | Bin 0 -> 88511 bytes
.../slack_postmortem_message.png | Bin 0 -> 35938 bytes
grafana-plugin/src/assets/img/telegram.svg | 1 +
.../telegram_channel_instruction_images/1.png | Bin 0 -> 541700 bytes
.../telegram_channel_instruction_images/2.png | Bin 0 -> 612505 bytes
.../telegram_channel_instruction_images/3.png | Bin 0 -> 588353 bytes
.../telegram_channel_instruction_images/4.png | Bin 0 -> 527057 bytes
.../telegram_channel_instruction_images/6.png | Bin 0 -> 1085849 bytes
.../telegram_channel_instruction_images/7.png | Bin 0 -> 851183 bytes
.../telegram_channel_instruction_images/8.png | Bin 0 -> 586944 bytes
.../telegram_channel_instruction_images/9.png | Bin 0 -> 566426 bytes
.../doge-meme-telegram.jpg | Bin 0 -> 57794 bytes
.../src/assets/img/theme/angular.jpg | Bin 0 -> 22527 bytes
.../src/assets/img/theme/bootstrap.jpg | Bin 0 -> 18083 bytes
.../src/assets/img/theme/forrest.png | Bin 0 -> 716609 bytes
.../src/assets/img/theme/profile-cover.jpg | Bin 0 -> 843735 bytes
grafana-plugin/src/assets/img/theme/react.jpg | Bin 0 -> 25566 bytes
.../src/assets/img/theme/sketch.jpg | Bin 0 -> 22085 bytes
.../src/assets/img/theme/team-1-800x800.jpg | Bin 0 -> 166436 bytes
.../src/assets/img/theme/team-2-800x800.jpg | Bin 0 -> 266315 bytes
.../src/assets/img/theme/team-3-800x800.jpg | Bin 0 -> 173325 bytes
.../src/assets/img/theme/team-4-800x800.jpg | Bin 0 -> 124734 bytes
grafana-plugin/src/assets/img/theme/vue.jpg | Bin 0 -> 18469 bytes
grafana-plugin/src/assets/img/users.svg | 1 +
.../AlertTemplatesForm.config.ts | 77 +
.../AlertTemplatesForm.helper.tsx | 16 +
.../AlertTemplatesForm.module.css | 51 +
.../AlertTemplates/AlertTemplatesForm.tsx | 272 +
.../src/components/Avatar/Avatar.module.css | 20 +
.../src/components/Avatar/Avatar.tsx | 25 +
.../CardButton/CardButton.module.css | 29 +
.../src/components/CardButton/CardButton.tsx | 45 +
.../components/Collapse/Collapse.module.css | 28 +
.../src/components/Collapse/Collapse.tsx | 56 +
.../EscalationsFilters.module.css | 8 +
.../EscalationsFilters/EscalationsFilters.tsx | 55 +
.../src/components/GBlock/Block.module.css | 20 +
.../src/components/GBlock/Block.tsx | 33 +
.../src/components/GForm/GForm.module.css | 3 +
grafana-plugin/src/components/GForm/GForm.tsx | 120 +
.../src/components/GForm/GForm.types.ts | 31 +
.../src/components/GList/GList.module.css | 37 +
grafana-plugin/src/components/GList/GList.tsx | 79 +
.../src/components/GTable/GTable.module.css | 11 +
.../src/components/GTable/GTable.tsx | 167 +
.../IntegrationLogo/IntegrationLogo.config.ts | 22 +
.../IntegrationLogo.module.css | 25 +
.../IntegrationLogo/IntegrationLogo.tsx | 48 +
.../IntegrationsFilters.module.css | 17 +
.../IntegrationsFilters.tsx | 55 +
.../MonacoJinja2Editor.module.css | 3 +
.../MonacoJinja2Editor/MonacoJinja2Editor.tsx | 79 +
.../components/MonacoJinja2Editor/jinja2.ts | 388 +
.../PluginLink/PluginLink.module.css | 11 +
.../src/components/PluginLink/PluginLink.tsx | 48 +
.../src/components/Policy/DragHandle.tsx | 17 +
.../Policy/EscalationPolicy.module.css | 42 +
.../components/Policy/EscalationPolicy.tsx | 389 +
.../Policy/NotificationPolicy.module.css | 18 +
.../components/Policy/NotificationPolicy.tsx | 226 +
.../src/components/Policy/Policy.module.css | 24 +
.../src/components/Policy/PolicyNote.tsx | 52 +
.../SchedulesFilters.helpers.ts | 25 +
.../SchedulesFilters.module.css | 4 +
.../SchedulesFilters/SchedulesFilters.tsx | 57 +
.../SchedulesFilters.types.ts | 5 +
.../components/SortableList/SortableList.tsx | 11 +
.../SourceCode/SourceCode.module.css | 28 +
.../src/components/SourceCode/SourceCode.tsx | 43 +
.../src/components/Tag/Tag.module.css | 5 +
grafana-plugin/src/components/Tag/Tag.tsx | 24 +
.../src/components/Text/Text.module.css | 70 +
grafana-plugin/src/components/Text/Text.tsx | 163 +
.../components/TimeRange/TimeRange.module.css | 3 +
.../src/components/TimeRange/TimeRange.tsx | 107 +
.../components/Timeline/Timeline.module.css | 29 +
.../src/components/Timeline/Timeline.tsx | 27 +
.../src/components/Timeline/TimelineItem.tsx | 34 +
.../components/Tutorial/Tutorial.module.css | 57 +
.../src/components/Tutorial/Tutorial.tsx | 91 +
.../src/components/Tutorial/Tutorial.types.ts | 7 +
.../components/Tutorial/icons/bell-icon.svg | 3 +
.../Tutorial/icons/calendar-icon.svg | 3 +
.../components/Tutorial/icons/chat-icon.svg | 3 +
.../Tutorial/icons/escalation-icon.svg | 3 +
.../Tutorial/icons/integration-icon.svg | 3 +
.../UsersFilters/UsersFilters.module.css | 16 +
.../components/UsersFilters/UsersFilters.tsx | 83 +
.../VerticalTabsBar.module.css | 32 +
.../VerticalTabsBar/VerticalTabsBar.tsx | 48 +
.../WithConfirm/WithConfirm.module.css | 3 +
.../components/WithConfirm/WithConfirm.tsx | 58 +
.../AlertReceiveChannelCard.module.css | 16 +
.../AlertReceiveChannelCard.tsx | 92 +
.../AlertRules/AlertRules.helpers.ts | 16 +
.../AlertRules/AlertRules.module.css | 114 +
.../src/containers/AlertRules/AlertRules.tsx | 830 +
.../parts/connectors/SlackConnector.tsx | 115 +
.../parts/connectors/TelegramConnector.tsx | 68 +
.../parts/connectors/index.module.css | 7 +
.../AlertRules/parts/index.module.css | 3 +
.../src/containers/AlertRules/parts/index.tsx | 41 +
.../AlertTemplatesFormContainer.tsx | 78 +
.../ApiTokenSettings/ApiTokenForm.tsx | 68 +
.../ApiTokenSettings.module.css | 13 +
.../ApiTokenSettings/ApiTokenSettings.tsx | 166 +
.../AttachIncidentForm.module.css | 3 +
.../AttachIncidentForm/AttachIncidentForm.tsx | 110 +
.../ChannelFilterForm.module.css | 7 +
.../ChannelFilterForm/ChannelFilterForm.tsx | 122 +
...ateAlertReceiveChannelContainer.module.css | 57 +
.../CreateAlertReceiveChannelContainer.tsx | 109 +
.../DefaultPageLayout.helpers.tsx | 40 +
.../DefaultPageLayout.module.css | 20 +
.../DefaultPageLayout/DefaultPageLayout.tsx | 138 +
.../DefaultPageLayout.types.ts | 5 +
.../EscalationChainCard.module.css | 3 +
.../EscalationChainCard.tsx | 57 +
.../EscalationChainForm.module.css | 3 +
.../EscalationChainForm.tsx | 75 +
.../EscalationChainSteps.module.css | 3 +
.../EscalationChainSteps.tsx | 117 +
.../src/containers/GSelect/GSelect.module.css | 3 +
.../src/containers/GSelect/GSelect.tsx | 158 +
.../GrafanaTeamSelect.module.css | 20 +
.../GrafanaTeamSelect/GrafanaTeamSelect.tsx | 70 +
.../HeartbeatModal/HeartbeatForm.module.css | 29 +
.../HeartbeatModal/HeartbeatForm.tsx | 138 +
.../IncidentMatcher.module.css | 22 +
.../IncidentMatcher/IncidentMatcher.tsx | 112 +
.../IncidentFilters.helpers.ts | 34 +
.../IncidentsFilters/IncidentFilters.types.ts | 13 +
.../IncidentsFilters.module.css | 63 +
.../IncidentsFilters/IncidentsFilters.tsx | 434 +
.../IntegrationSettings.module.css | 32 +
.../IntegrationSettings.tsx | 191 +
.../IntegrationSettings.types.ts | 7 +
.../parts/Autoresolve.module.css | 30 +
.../IntegrationSettings/parts/Autoresolve.tsx | 173 +
.../IntegrationSettings/parts/LiveLogs.tsx | 44 +
.../MaintenanceForm.config.tsx | 75 +
.../MaintenanceForm.helpers.ts | 6 +
.../MaintenanceForm.module.css | 11 +
.../MaintenanceForm/MaintenanceForm.tsx | 79 +
.../MobileAppVerification.module.css | 8 +
.../MobileAppVerification.tsx | 154 +
.../OrganizationLogFilters.module.css | 19 +
.../OrganizationLogFilters.tsx | 98 +
.../OutgoingWebhookForm.config.ts | 51 +
.../OutgoingWebhookForm.module.css | 11 +
.../OutgoingWebhookForm.tsx | 73 +
.../PersonalNotificationSettings.helpers.ts | 3 +
.../PersonalNotificationSettings.module.css | 17 +
.../PersonalNotificationSettings.tsx | 153 +
.../img/default-step.png | Bin 0 -> 9731 bytes
.../PluginConfigPage.module.css | 7 +
.../PluginConfigPage/PluginConfigPage.tsx | 354 +
.../RemoteSelect/RemoteSelect.module.css | 3 +
.../containers/RemoteSelect/RemoteSelect.tsx | 123 +
.../ScheduleForm/ScheduleForm.config.ts | 141 +
.../ScheduleForm/ScheduleForm.helpers.ts | 18 +
.../ScheduleForm/ScheduleForm.module.css | 11 +
.../containers/ScheduleForm/ScheduleForm.tsx | 99 +
.../ScheduleIcalLink.module.css | 3 +
.../ScheduleIcalLink/ScheduleIcalLink.tsx | 108 +
.../SlackIntegrationButton.tsx | 98 +
.../TelegramIntegrationButton.module.css | 21 +
.../TelegramIntegrationButton.tsx | 183 +
.../TemplatePreview.module.css | 15 +
.../TemplatePreview/TemplatePreview.tsx | 58 +
.../UserSettings/UserSettings.module.css | 7 +
.../containers/UserSettings/UserSettings.tsx | 96 +
.../UserSettings/UserSettings.types.ts | 16 +
.../parts/connectors/ICalConnector.tsx | 119 +
.../parts/connectors/MobileAppConnector.tsx | 44 +
.../parts/connectors/PhoneConnector.tsx | 62 +
.../parts/connectors/SlackConnector.tsx | 66 +
.../parts/connectors/TelegramConnector.tsx | 63 +
.../parts/connectors/index.module.css | 32 +
.../UserSettings/parts/connectors/index.tsx | 27 +
.../UserSettings/parts/index.module.css | 30 +
.../containers/UserSettings/parts/index.tsx | 137 +
.../parts/tabs/NotificationSettingsTab.tsx | 24 +
.../PhoneVerification.module.css | 8 +
.../PhoneVerification/PhoneVerification.tsx | 189 +
.../parts/tabs/SlackTab/SlackTab.module.css | 4 +
.../parts/tabs/SlackTab/SlackTab.tsx | 37 +
.../tabs/TelegramInfo/TelegramInfo.module.css | 8 +
.../parts/tabs/TelegramInfo/TelegramInfo.tsx | 84 +
.../tabs/UserInfoTab/UserInfoTab.module.css | 7 +
.../parts/tabs/UserInfoTab/UserInfoTab.tsx | 53 +
.../containers/UserSummary/UserSummary.tsx | 31 +
.../UserTooltip/UserTooltip.module.css | 3 +
.../containers/UserTooltip/UserTooltip.tsx | 35 +
.../WithPermissionControl.module.css | 3 +
.../WithPermissionControl.tsx | 56 +
grafana-plugin/src/declare/index.d.ts | 1 +
grafana-plugin/src/icons/grafana-icon.svg | 10 +
grafana-plugin/src/icons/heart-green.svg | 3 +
grafana-plugin/src/icons/heart-red.svg | 3 +
grafana-plugin/src/icons/index.tsx | 192 +
grafana-plugin/src/img/logo.svg | 9 +
grafana-plugin/src/img/screenshot.png | Bin 0 -> 469491 bytes
.../img/slack_workspace_choose_attention.png | Bin 0 -> 43568 bytes
.../src/img/telegram_discussion.png | Bin 0 -> 338148 bytes
grafana-plugin/src/index.css | 56 +
grafana-plugin/src/index.d.ts | 13 +
grafana-plugin/src/interceptors/index.ts | 19 +
grafana-plugin/src/models/action.ts | 12 +
.../src/models/alert_receive_channel.ts | 34 +
.../alert_receive_channel.helpers.ts | 11 +
.../alert_receive_channel.ts | 358 +
.../alert_receive_channel.types.ts | 46 +
.../alert_receive_channel_filters.ts | 48 +
grafana-plugin/src/models/alert_templates.ts | 19 +
.../src/models/alertgroup/alertgroup.ts | 433 +
.../src/models/alertgroup/alertgroup.types.ts | 90 +
grafana-plugin/src/models/api_key.ts | 5 +
.../src/models/api_token/api_token.ts | 61 +
.../src/models/api_token/api_token.types.ts | 6 +
grafana-plugin/src/models/base_store.ts | 76 +
grafana-plugin/src/models/card.ts | 14 +
grafana-plugin/src/models/channel.ts | 8 +
grafana-plugin/src/models/channel_filter.ts | 14 +
.../channel_filter/channel_filter.types.ts | 22 +
grafana-plugin/src/models/curler/curler.ts | 111 +
.../src/models/curler/curler.types.ts | 26 +
.../src/models/current_subscription.ts | 59 +
.../current_subscription.ts | 23 +
.../current_subscription.types.ts | 86 +
.../escalation_chain/escalation_chain.ts | 92 +
.../escalation_chain.types.ts | 16 +
.../src/models/escalation_policy.ts | 40 +
.../escalation_policy.helpers.ts | 7 +
.../escalation_policy/escalation_policy.ts | 138 +
.../escalation_policy.types.ts | 36 +
.../models/global_setting/global_setting.ts | 63 +
.../global_setting/global_setting.types.ts | 9 +
.../src/models/grafana_team/grafana_team.ts | 48 +
.../models/grafana_team/grafana_team.types.ts | 6 +
.../src/models/heartbeat/heartbeat.ts | 55 +
.../src/models/heartbeat/heartbeat.types.ts | 11 +
.../src/models/integrations_list.ts | 6 +
grafana-plugin/src/models/leader.ts | 10 +
.../src/models/maintenance/helpers.ts | 10 +
.../src/models/maintenance/maintenance.ts | 55 +
.../models/maintenance/maintenance.types.ts | 19 +
.../src/models/notification_policy.ts | 19 +
grafana-plugin/src/models/notify_by.ts | 4 +
.../organization_log/organization_log.ts | 60 +
.../organization_log.types.ts | 10 +
.../outgoing_webhook/outgoing_webhook.ts | 72 +
.../outgoing_webhook.types.ts | 11 +
.../models/resolution_note/resolution_note.ts | 27 +
.../resolution_note/resolution_note.types.ts | 24 +
grafana-plugin/src/models/schedule.ts | 14 +
.../src/models/schedule/schedule.ts | 110 +
.../src/models/schedule/schedule.types.ts | 44 +
grafana-plugin/src/models/slack/slack.ts | 74 +
.../src/models/slack/slack.types.ts | 5 +
.../slack_channel/slack_channel.config.ts | 1 +
.../slack_channel/slack_channel.helpers.ts | 13 +
.../src/models/slack_channel/slack_channel.ts | 72 +
.../slack_channel/slack_channel.types.ts | 5 +
grafana-plugin/src/models/team.ts | 4 +
grafana-plugin/src/models/team/team.ts | 127 +
grafana-plugin/src/models/team/team.types.ts | 73 +
.../telegram_channel.helpers.ts | 1 +
.../telegram_channel/telegram_channel.ts | 122 +
.../telegram_channel.types.ts | 8 +
grafana-plugin/src/models/user.ts | 34 +
grafana-plugin/src/models/user/user.config.ts | 10 +
.../src/models/user/user.helpers.tsx | 48 +
grafana-plugin/src/models/user/user.ts | 355 +
grafana-plugin/src/models/user/user.types.ts | 53 +
.../src/models/user_group/user_group.ts | 52 +
.../src/models/user_group/user_group.types.ts | 5 +
grafana-plugin/src/models/wait_delay.ts | 4 +
grafana-plugin/src/models/webinar/webinar.ts | 67 +
.../src/models/webinar/webinar.types.ts | 13 +
grafana-plugin/src/module.ts | 20 +
grafana-plugin/src/network/index.ts | 47 +
.../src/pages/chat-ops/ChatOps.module.css | 14 +
grafana-plugin/src/pages/chat-ops/ChatOps.tsx | 51 +
.../src/pages/chat-ops/ChatOps.types.ts | 4 +
.../src/pages/chat-ops/parts/index.tsx | 63 +
.../SlackSettings/SlackSettings.module.css | 17 +
.../tabs/SlackSettings/SlackSettings.tsx | 167 +
.../TelegramSettings.module.css | 8 +
.../TelegramSettings/TelegramSettings.tsx | 169 +
.../EscalationChains.module.css | 53 +
.../escalation-chains/EscalationChains.tsx | 391 +
.../src/pages/incident/Incident.helpers.tsx | 243 +
.../src/pages/incident/Incident.module.css | 105 +
.../src/pages/incident/Incident.tsx | 565 +
.../src/pages/incidents/Incidents.module.css | 36 +
.../src/pages/incidents/Incidents.tsx | 596 +
.../pages/incidents/parts/SilenceDropdown.tsx | 53 +
grafana-plugin/src/pages/index.ts | 126 +
.../integrations/Integrations.module.css | 40 +
.../src/pages/integrations/Integrations.tsx | 313 +
.../pages/livesettings/LiveSettings.config.ts | 2 +
.../livesettings/LiveSettings.helpers.ts | 17 +
.../livesettings/LiveSettings.module.css | 20 +
.../pages/livesettings/LiveSettingsPage.tsx | 261 +
.../pages/maintenance/Maintenance.module.css | 12 +
.../src/pages/maintenance/Maintenance.tsx | 227 +
.../migration-tool/MigrationTool.module.css | 9 +
.../pages/migration-tool/MigrationTool.tsx | 368 +
.../pages/migration-tool/img/api-tokens.png | Bin 0 -> 46193 bytes
.../OrganizationLog.module.css | 27 +
.../organization-logs/OrganizationLog.tsx | 199 +
.../OutgoingWebhooks.module.css | 8 +
.../outgoing_webhooks/OutgoingWebhooks.tsx | 167 +
.../src/pages/schedules/Schedules.helpers.ts | 65 +
.../src/pages/schedules/Schedules.module.css | 72 +
.../src/pages/schedules/Schedules.tsx | 517 +
.../pages/settings/SettingsPage.module.css | 11 +
.../src/pages/settings/SettingsPage.tsx | 100 +
grafana-plugin/src/pages/test/Test.module.css | 7 +
grafana-plugin/src/pages/test/Test.tsx | 45 +
.../src/pages/users/Users.helpers.ts | 27 +
.../src/pages/users/Users.module.css | 52 +
grafana-plugin/src/pages/users/Users.tsx | 335 +
grafana-plugin/src/plugin.json | 200 +
.../src/services/experimentManager.ts | 16 +
.../src/services/googleTagManager.ts | 5 +
grafana-plugin/src/services/mixpanel.ts | 28 +
grafana-plugin/src/services/urlManager.ts | 24 +
grafana-plugin/src/state/features.ts | 6 +
grafana-plugin/src/state/helpers.ts | 61 +
grafana-plugin/src/state/incidents.ts | 3 +
grafana-plugin/src/state/index.ts | 5 +
grafana-plugin/src/state/plugin.ts | 42 +
grafana-plugin/src/state/rootBaseStore.ts | 250 +
grafana-plugin/src/state/types.ts | 10 +
grafana-plugin/src/state/useStore.ts | 11 +
grafana-plugin/src/state/userAction.ts | 23 +
grafana-plugin/src/state/withStore.tsx | 11 +
grafana-plugin/src/types.ts | 15 +
grafana-plugin/src/utils/consts.ts | 4 +
grafana-plugin/src/utils/datetime.ts | 67 +
grafana-plugin/src/utils/hooks.ts | 104 +
grafana-plugin/src/utils/index.ts | 138 +
grafana-plugin/src/utils/loadCss.ts | 14 +
grafana-plugin/src/utils/localStorage.ts | 60 +
grafana-plugin/src/utils/sanitize.ts | 9 +
grafana-plugin/src/utils/url.ts | 11 +
grafana-plugin/src/vars.css | 44 +
.../eslint-rules/no-relative-import-paths.js | 56 +
.../tools/plop/generators/appendReadmeFile.js | 16 +
.../plop/generators/createComponentFiles.js | 29 +
.../plop/generators/createContainerFiles.js | 29 +
.../tools/plop/generators/createModelFiles.js | 22 +
.../tools/plop/helpers/configNeeded.js | 10 +
.../tools/plop/prompts/componentPrompts.js | 12 +
.../tools/plop/prompts/containerPrompts.js | 12 +
.../tools/plop/prompts/modelPrompts.js | 8 +
.../tools/plop/prompts/readmePrompts.js | 17 +
.../tools/plop/templates/BuildInfo.md.hbs | 1 +
.../Component/ClassComponent.tsx.hbs | 29 +
.../Component/Component.module.css.hbs | 3 +
.../Component/FunctionalComponent.tsx.hbs | 20 +
.../Container/ClassComponent.tsx.hbs | 32 +
.../Container/Component.module.css.hbs | 3 +
.../Container/FunctionalComponent.tsx.hbs | 25 +
.../plop/templates/Model/BaseModel.ts.hbs | 65 +
.../templates/Model/BaseModel.types.ts.hbs | 3 +
grafana-plugin/tsconfig.json | 13 +
grafana-plugin/webpack.config.js | 77 +
grafana-plugin/yarn.lock | 15514 ++++++++++++++++
screenshot.png | Bin 0 -> 469491 bytes
tools/image-tag.sh | 16 +
1217 files changed, 112652 insertions(+)
create mode 100644 .dockerignore
create mode 100644 .drone.yml
create mode 100644 .env.example
create mode 100644 .github/issue_and_pr_commands.json
create mode 100644 .github/workflows/backend-ci.yml
create mode 100644 .github/workflows/frontend-ci.yml
create mode 100644 .github/workflows/issue_commands.yml
create mode 100644 .github/workflows/publish_docs.yml
create mode 100644 .gitignore
create mode 100644 .pre-commit-config.yaml
create mode 100644 CHANGELOG.md
create mode 100644 DEVELOPER.md
create mode 100644 LICENSE
create mode 100644 LICENSING.md
create mode 100644 README.md
create mode 100644 SECURITY.md
create mode 100644 developer-docker-compose.yml
create mode 100644 docs/Makefile
create mode 100644 docs/README.md
create mode 100644 docs/sources/_index.md
create mode 100644 docs/sources/calendar-schedules/_index.md
create mode 100644 docs/sources/calendar-schedules/about-calendars.md
create mode 100644 docs/sources/calendar-schedules/create-calendar.md
create mode 100644 docs/sources/chat-options/_index.md
create mode 100644 docs/sources/chat-options/configure-slack.md
create mode 100644 docs/sources/chat-options/configure-telegram.md
create mode 100644 docs/sources/configure-notifications.md
create mode 100644 docs/sources/configure-user-settings.md
create mode 100644 docs/sources/escalation-policies/_index.md
create mode 100644 docs/sources/escalation-policies/about-escalation-policies.md
create mode 100644 docs/sources/escalation-policies/configure-escalation-policies.md
create mode 100644 docs/sources/integrations/_index.md
create mode 100644 docs/sources/integrations/add-alertmanager.md
create mode 100644 docs/sources/integrations/add-grafana-alerting.md
create mode 100644 docs/sources/integrations/add-integration.md
create mode 100644 docs/sources/integrations/add-zabbix.md
create mode 100644 docs/sources/integrations/webhooks/_index.md
create mode 100644 docs/sources/integrations/webhooks/add-webhook-integration.md
create mode 100644 docs/sources/integrations/webhooks/configure-outgoing-webhooks.md
create mode 100644 docs/sources/integrations/webhooks/create-custom-templates.md
create mode 100644 docs/sources/manage-alert-groups.md
create mode 100644 docs/sources/oncall-api-reference/_index.md
create mode 100644 docs/sources/oncall-api-reference/alertgroups.md
create mode 100644 docs/sources/oncall-api-reference/alerts.md
create mode 100644 docs/sources/oncall-api-reference/escalation_chains.md
create mode 100644 docs/sources/oncall-api-reference/escalation_policies.md
create mode 100644 docs/sources/oncall-api-reference/integrations.md
create mode 100644 docs/sources/oncall-api-reference/on_call_shifts.md
create mode 100644 docs/sources/oncall-api-reference/outgoing_webhooks.md
create mode 100644 docs/sources/oncall-api-reference/personal_notification_rules.md
create mode 100644 docs/sources/oncall-api-reference/postmortem_messages.md
create mode 100644 docs/sources/oncall-api-reference/postmortems.md
create mode 100644 docs/sources/oncall-api-reference/routes.md
create mode 100644 docs/sources/oncall-api-reference/schedules.md
create mode 100644 docs/sources/oncall-api-reference/slack_channels.md
create mode 100644 docs/sources/oncall-api-reference/user_groups.md
create mode 100644 docs/sources/oncall-api-reference/users.md
create mode 100644 engine/Dockerfile
create mode 100644 engine/Dockerfile.all-in-one
create mode 100644 engine/apps/__init__.py
create mode 100644 engine/apps/alerts/__init__.py
create mode 100644 engine/apps/alerts/admin.py
create mode 100644 engine/apps/alerts/constants.py
create mode 100644 engine/apps/alerts/escalation_snapshot/__init__.py
create mode 100644 engine/apps/alerts/escalation_snapshot/escalation_snapshot_mixin.py
create mode 100644 engine/apps/alerts/escalation_snapshot/serializers/__init__.py
create mode 100644 engine/apps/alerts/escalation_snapshot/serializers/channel_filter_snapshot.py
create mode 100644 engine/apps/alerts/escalation_snapshot/serializers/escalation_chain_snapshot.py
create mode 100644 engine/apps/alerts/escalation_snapshot/serializers/escalation_policy_snapshot.py
create mode 100644 engine/apps/alerts/escalation_snapshot/serializers/escalation_snapshot.py
create mode 100644 engine/apps/alerts/escalation_snapshot/snapshot_classes/__init__.py
create mode 100644 engine/apps/alerts/escalation_snapshot/snapshot_classes/channel_filter_snapshot.py
create mode 100644 engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_chain_snapshot.py
create mode 100644 engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_policy_snapshot.py
create mode 100644 engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_snapshot.py
create mode 100644 engine/apps/alerts/escalation_snapshot/utils.py
create mode 100644 engine/apps/alerts/grafana_alerting_sync_manager/__init__.py
create mode 100644 engine/apps/alerts/grafana_alerting_sync_manager/grafana_alerting_sync.py
create mode 100644 engine/apps/alerts/incident_appearance/__init__.py
create mode 100644 engine/apps/alerts/incident_appearance/renderers/__init__.py
create mode 100644 engine/apps/alerts/incident_appearance/renderers/base_renderer.py
create mode 100644 engine/apps/alerts/incident_appearance/renderers/constants.py
create mode 100644 engine/apps/alerts/incident_appearance/renderers/email_renderer.py
create mode 100644 engine/apps/alerts/incident_appearance/renderers/phone_call_renderer.py
create mode 100644 engine/apps/alerts/incident_appearance/renderers/slack_renderer.py
create mode 100644 engine/apps/alerts/incident_appearance/renderers/sms_renderer.py
create mode 100644 engine/apps/alerts/incident_appearance/renderers/telegram_renderer.py
create mode 100644 engine/apps/alerts/incident_appearance/renderers/web_renderer.py
create mode 100644 engine/apps/alerts/incident_appearance/templaters/__init__.py
create mode 100644 engine/apps/alerts/incident_appearance/templaters/alert_templater.py
create mode 100644 engine/apps/alerts/incident_appearance/templaters/email_templater.py
create mode 100644 engine/apps/alerts/incident_appearance/templaters/phone_call_templater.py
create mode 100644 engine/apps/alerts/incident_appearance/templaters/slack_templater.py
create mode 100644 engine/apps/alerts/incident_appearance/templaters/sms_templater.py
create mode 100644 engine/apps/alerts/incident_appearance/templaters/telegram_templater.py
create mode 100644 engine/apps/alerts/incident_appearance/templaters/web_templater.py
create mode 100644 engine/apps/alerts/incident_log_builder/__init__.py
create mode 100644 engine/apps/alerts/incident_log_builder/incident_log_builder.py
create mode 100644 engine/apps/alerts/integration_options_mixin.py
create mode 100644 engine/apps/alerts/migrations/0001_squashed_initial.py
create mode 100644 engine/apps/alerts/migrations/0002_squashed_initial.py
create mode 100644 engine/apps/alerts/migrations/0003_squashed_create_demo_token_instances.py
create mode 100644 engine/apps/alerts/migrations/__init__.py
create mode 100644 engine/apps/alerts/models/__init__.py
create mode 100644 engine/apps/alerts/models/alert.py
create mode 100644 engine/apps/alerts/models/alert_group.py
create mode 100644 engine/apps/alerts/models/alert_group_counter.py
create mode 100644 engine/apps/alerts/models/alert_group_log_record.py
create mode 100644 engine/apps/alerts/models/alert_manager_models.py
create mode 100644 engine/apps/alerts/models/alert_receive_channel.py
create mode 100644 engine/apps/alerts/models/channel_filter.py
create mode 100644 engine/apps/alerts/models/custom_button.py
create mode 100644 engine/apps/alerts/models/escalation_chain.py
create mode 100644 engine/apps/alerts/models/escalation_policy.py
create mode 100644 engine/apps/alerts/models/grafana_alerting_contact_point.py
create mode 100644 engine/apps/alerts/models/invitation.py
create mode 100644 engine/apps/alerts/models/maintainable_object.py
create mode 100644 engine/apps/alerts/models/resolution_note.py
create mode 100644 engine/apps/alerts/models/user_has_notification.py
create mode 100644 engine/apps/alerts/representative.py
create mode 100644 engine/apps/alerts/signals.py
create mode 100644 engine/apps/alerts/tasks/__init__.py
create mode 100644 engine/apps/alerts/tasks/acknowledge_reminder.py
create mode 100644 engine/apps/alerts/tasks/cache_alert_group_for_web.py
create mode 100644 engine/apps/alerts/tasks/calculcate_escalation_finish_time.py
create mode 100644 engine/apps/alerts/tasks/call_ack_url.py
create mode 100644 engine/apps/alerts/tasks/check_escalation_finished.py
create mode 100644 engine/apps/alerts/tasks/compare_escalations.py
create mode 100644 engine/apps/alerts/tasks/create_contact_points_for_datasource.py
create mode 100644 engine/apps/alerts/tasks/custom_button_result.py
create mode 100644 engine/apps/alerts/tasks/delete_alert_group.py
create mode 100644 engine/apps/alerts/tasks/distribute_alert.py
create mode 100644 engine/apps/alerts/tasks/escalate_alert_group.py
create mode 100644 engine/apps/alerts/tasks/invalidate_web_cache_for_alert_group.py
create mode 100644 engine/apps/alerts/tasks/invite_user_to_join_incident.py
create mode 100644 engine/apps/alerts/tasks/maintenance.py
create mode 100644 engine/apps/alerts/tasks/notify_all.py
create mode 100644 engine/apps/alerts/tasks/notify_group.py
create mode 100644 engine/apps/alerts/tasks/notify_ical_schedule_shift.py
create mode 100644 engine/apps/alerts/tasks/notify_user.py
create mode 100644 engine/apps/alerts/tasks/resolve_alert_group_by_source_if_needed.py
create mode 100644 engine/apps/alerts/tasks/resolve_alert_group_if_needed.py
create mode 100644 engine/apps/alerts/tasks/resolve_by_last_step.py
create mode 100644 engine/apps/alerts/tasks/send_alert_group_signal.py
create mode 100644 engine/apps/alerts/tasks/send_update_log_report_signal.py
create mode 100644 engine/apps/alerts/tasks/send_update_postmortem_signal.py
create mode 100644 engine/apps/alerts/tasks/send_update_resolution_note_signal.py
create mode 100644 engine/apps/alerts/tasks/sync_grafana_alerting_contact_points.py
create mode 100644 engine/apps/alerts/tasks/task_logger.py
create mode 100644 engine/apps/alerts/tasks/unsilence.py
create mode 100644 engine/apps/alerts/tasks/wipe.py
create mode 100644 engine/apps/alerts/terraform_renderer/__init__.py
create mode 100644 engine/apps/alerts/terraform_renderer/terraform_file_renderer.py
create mode 100644 engine/apps/alerts/terraform_renderer/terraform_state_renderer.py
create mode 100644 engine/apps/alerts/tests/__init__.py
create mode 100644 engine/apps/alerts/tests/conftest.py
create mode 100644 engine/apps/alerts/tests/factories.py
create mode 100644 engine/apps/alerts/tests/test_alert_group.py
create mode 100644 engine/apps/alerts/tests/test_alert_group_renderer.py
create mode 100644 engine/apps/alerts/tests/test_alert_manager.py
create mode 100644 engine/apps/alerts/tests/test_alert_receiver_channel.py
create mode 100644 engine/apps/alerts/tests/test_channel_filter.py
create mode 100644 engine/apps/alerts/tests/test_check_escalation_finished_task.py
create mode 100644 engine/apps/alerts/tests/test_custom_button.py
create mode 100644 engine/apps/alerts/tests/test_default_templates.py
create mode 100644 engine/apps/alerts/tests/test_escalation_chain.py
create mode 100644 engine/apps/alerts/tests/test_escalation_policy_snapshot.py
create mode 100644 engine/apps/alerts/tests/test_escalation_snapshot.py
create mode 100644 engine/apps/alerts/tests/test_incident_log_builder.py
create mode 100644 engine/apps/alerts/tests/test_maintenance.py
create mode 100644 engine/apps/alerts/tests/test_notify_ical_schedule_shift.py
create mode 100644 engine/apps/alerts/tests/test_notify_user.py
create mode 100644 engine/apps/alerts/tests/test_representative.py
create mode 100644 engine/apps/alerts/tests/test_silence.py
create mode 100644 engine/apps/alerts/tests/test_terraform_renderer.py
create mode 100644 engine/apps/alerts/tests/test_utils.py
create mode 100644 engine/apps/alerts/tests/test_wipe.py
create mode 100644 engine/apps/alerts/utils.py
create mode 100644 engine/apps/api/__init__.py
create mode 100644 engine/apps/api/permissions/__init__.py
create mode 100644 engine/apps/api/permissions/actions.py
create mode 100644 engine/apps/api/permissions/constants.py
create mode 100644 engine/apps/api/permissions/methods.py
create mode 100644 engine/apps/api/permissions/owner.py
create mode 100644 engine/apps/api/permissions/roles.py
create mode 100644 engine/apps/api/response_renderers.py
create mode 100644 engine/apps/api/serializers/__init__.py
create mode 100644 engine/apps/api/serializers/alert.py
create mode 100644 engine/apps/api/serializers/alert_group.py
create mode 100644 engine/apps/api/serializers/alert_receive_channel.py
create mode 100644 engine/apps/api/serializers/channel_filter.py
create mode 100644 engine/apps/api/serializers/custom_button.py
create mode 100644 engine/apps/api/serializers/custom_serializers.py
create mode 100644 engine/apps/api/serializers/escalation_chain.py
create mode 100644 engine/apps/api/serializers/escalation_policy.py
create mode 100644 engine/apps/api/serializers/integration_heartbeat.py
create mode 100644 engine/apps/api/serializers/live_setting.py
create mode 100644 engine/apps/api/serializers/organization.py
create mode 100644 engine/apps/api/serializers/organization_log_record.py
create mode 100644 engine/apps/api/serializers/organization_slack_settings.py
create mode 100644 engine/apps/api/serializers/public_api_token.py
create mode 100644 engine/apps/api/serializers/resolution_note.py
create mode 100644 engine/apps/api/serializers/schedule_base.py
create mode 100644 engine/apps/api/serializers/schedule_calendar.py
create mode 100644 engine/apps/api/serializers/schedule_ical.py
create mode 100644 engine/apps/api/serializers/schedule_polymorphic.py
create mode 100644 engine/apps/api/serializers/schedule_reminder.py
create mode 100644 engine/apps/api/serializers/slack_channel.py
create mode 100644 engine/apps/api/serializers/slack_user_identity.py
create mode 100644 engine/apps/api/serializers/team.py
create mode 100644 engine/apps/api/serializers/telegram.py
create mode 100644 engine/apps/api/serializers/user.py
create mode 100644 engine/apps/api/serializers/user_group.py
create mode 100644 engine/apps/api/serializers/user_notification_policy.py
create mode 100644 engine/apps/api/tasks.py
create mode 100644 engine/apps/api/tests/__init__.py
create mode 100644 engine/apps/api/tests/conftest.py
create mode 100644 engine/apps/api/tests/test_alert_group.py
create mode 100644 engine/apps/api/tests/test_alert_receive_channel.py
create mode 100644 engine/apps/api/tests/test_alert_receive_channel_template.py
create mode 100644 engine/apps/api/tests/test_channel_filter.py
create mode 100644 engine/apps/api/tests/test_custom_button.py
create mode 100644 engine/apps/api/tests/test_escalation_chain.py
create mode 100644 engine/apps/api/tests/test_escalation_policy.py
create mode 100644 engine/apps/api/tests/test_features.py
create mode 100644 engine/apps/api/tests/test_gitops.py
create mode 100644 engine/apps/api/tests/test_integration_heartbeat.py
create mode 100644 engine/apps/api/tests/test_live_settings.py
create mode 100644 engine/apps/api/tests/test_maintenance.py
create mode 100644 engine/apps/api/tests/test_organization.py
create mode 100644 engine/apps/api/tests/test_organization_log_record.py
create mode 100644 engine/apps/api/tests/test_postmortem_messages.py
create mode 100644 engine/apps/api/tests/test_preview_template_options.py
create mode 100644 engine/apps/api/tests/test_route_regex_debugger.py
create mode 100644 engine/apps/api/tests/test_schedule_export.py
create mode 100644 engine/apps/api/tests/test_schedules.py
create mode 100644 engine/apps/api/tests/test_set_general_log_channel.py
create mode 100644 engine/apps/api/tests/test_slack_channels.py
create mode 100644 engine/apps/api/tests/test_slack_team_settings.py
create mode 100644 engine/apps/api/tests/test_subscription.py
create mode 100644 engine/apps/api/tests/test_team.py
create mode 100644 engine/apps/api/tests/test_telegram_channel.py
create mode 100644 engine/apps/api/tests/test_terraform_renderer.py
create mode 100644 engine/apps/api/tests/test_user.py
create mode 100644 engine/apps/api/tests/test_user_groups.py
create mode 100644 engine/apps/api/tests/test_user_notification_policy.py
create mode 100644 engine/apps/api/tests/test_user_schedule_export.py
create mode 100644 engine/apps/api/throttlers/__init__.py
create mode 100644 engine/apps/api/throttlers/demo_alert_throttler.py
create mode 100644 engine/apps/api/urls.py
create mode 100644 engine/apps/api/views/__init__.py
create mode 100644 engine/apps/api/views/alert_group.py
create mode 100644 engine/apps/api/views/alert_receive_channel.py
create mode 100644 engine/apps/api/views/alert_receive_channel_template.py
create mode 100644 engine/apps/api/views/apns_device.py
create mode 100644 engine/apps/api/views/auth.py
create mode 100644 engine/apps/api/views/channel_filter.py
create mode 100644 engine/apps/api/views/custom_button.py
create mode 100644 engine/apps/api/views/escalation_chain.py
create mode 100644 engine/apps/api/views/escalation_policy.py
create mode 100644 engine/apps/api/views/features.py
create mode 100644 engine/apps/api/views/gitops.py
create mode 100644 engine/apps/api/views/integration_heartbeat.py
create mode 100644 engine/apps/api/views/live_setting.py
create mode 100644 engine/apps/api/views/maintenance.py
create mode 100644 engine/apps/api/views/organization.py
create mode 100644 engine/apps/api/views/organization_log_record.py
create mode 100644 engine/apps/api/views/preview_template_options.py
create mode 100644 engine/apps/api/views/public_api_tokens.py
create mode 100644 engine/apps/api/views/resolution_note.py
create mode 100644 engine/apps/api/views/route_regex_debugger.py
create mode 100644 engine/apps/api/views/schedule.py
create mode 100644 engine/apps/api/views/slack_channel.py
create mode 100644 engine/apps/api/views/slack_team_settings.py
create mode 100644 engine/apps/api/views/subscription.py
create mode 100644 engine/apps/api/views/team.py
create mode 100644 engine/apps/api/views/telegram_channels.py
create mode 100644 engine/apps/api/views/user.py
create mode 100644 engine/apps/api/views/user_group.py
create mode 100644 engine/apps/api/views/user_notification_policy.py
create mode 100644 engine/apps/api_for_grafana_incident/__init__.py
create mode 100644 engine/apps/api_for_grafana_incident/apps.py
create mode 100644 engine/apps/api_for_grafana_incident/serializers.py
create mode 100644 engine/apps/api_for_grafana_incident/urls.py
create mode 100644 engine/apps/api_for_grafana_incident/views.py
create mode 100644 engine/apps/auth_token/__init__.py
create mode 100644 engine/apps/auth_token/auth.py
create mode 100644 engine/apps/auth_token/constants.py
create mode 100644 engine/apps/auth_token/crypto.py
create mode 100644 engine/apps/auth_token/exceptions.py
create mode 100644 engine/apps/auth_token/migrations/0001_squashed_initial.py
create mode 100644 engine/apps/auth_token/migrations/0002_squashed_initial.py
create mode 100644 engine/apps/auth_token/migrations/0003_squashed_create_demo_token_instances.py
create mode 100644 engine/apps/auth_token/migrations/__init__.py
create mode 100644 engine/apps/auth_token/models/__init__.py
create mode 100644 engine/apps/auth_token/models/api_auth_token.py
create mode 100644 engine/apps/auth_token/models/base_auth_token.py
create mode 100644 engine/apps/auth_token/models/mobile_app_auth_token.py
create mode 100644 engine/apps/auth_token/models/mobile_app_verification_token.py
create mode 100644 engine/apps/auth_token/models/plugin_auth_token.py
create mode 100644 engine/apps/auth_token/models/schedule_export_auth_token.py
create mode 100644 engine/apps/auth_token/models/slack_auth_token.py
create mode 100644 engine/apps/auth_token/models/user_schedule_export_auth_token.py
create mode 100644 engine/apps/auth_token/tests/__init__.py
create mode 100644 engine/apps/auth_token/tests/test_crypto.py
create mode 100644 engine/apps/base/__init__.py
create mode 100644 engine/apps/base/admin.py
create mode 100644 engine/apps/base/constants.py
create mode 100644 engine/apps/base/messaging.py
create mode 100644 engine/apps/base/migrations/0001_squashed_initial.py
create mode 100644 engine/apps/base/migrations/0002_squashed_initial.py
create mode 100644 engine/apps/base/migrations/0003_squashed_create_demo_token_instances.py
create mode 100644 engine/apps/base/migrations/__init__.py
create mode 100644 engine/apps/base/models/__init__.py
create mode 100644 engine/apps/base/models/dynamic_setting.py
create mode 100644 engine/apps/base/models/failed_to_invoke_celery_task.py
create mode 100644 engine/apps/base/models/live_setting.py
create mode 100644 engine/apps/base/models/organization_log_record.py
create mode 100644 engine/apps/base/models/user_notification_policy.py
create mode 100644 engine/apps/base/models/user_notification_policy_log_record.py
create mode 100644 engine/apps/base/tasks.py
create mode 100644 engine/apps/base/tests/__init__.py
create mode 100644 engine/apps/base/tests/factories.py
create mode 100644 engine/apps/base/tests/messaging_backend.py
create mode 100644 engine/apps/base/tests/test_live_settings.py
create mode 100644 engine/apps/base/tests/test_messaging.py
create mode 100644 engine/apps/base/tests/test_organization_log_record.py
create mode 100644 engine/apps/base/tests/test_user_notification_policy.py
create mode 100644 engine/apps/base/tests/test_user_notification_policy_log_record.py
create mode 100644 engine/apps/base/utils.py
create mode 100644 engine/apps/grafana_plugin/__init__.py
create mode 100644 engine/apps/grafana_plugin/helpers/__init__.py
create mode 100644 engine/apps/grafana_plugin/helpers/client.py
create mode 100644 engine/apps/grafana_plugin/helpers/gcom.py
create mode 100644 engine/apps/grafana_plugin/permissions.py
create mode 100644 engine/apps/grafana_plugin/tasks/__init__.py
create mode 100644 engine/apps/grafana_plugin/tasks/sync.py
create mode 100644 engine/apps/grafana_plugin/tests/test_sync.py
create mode 100644 engine/apps/grafana_plugin/urls.py
create mode 100644 engine/apps/grafana_plugin/views/__init__.py
create mode 100644 engine/apps/grafana_plugin/views/install.py
create mode 100644 engine/apps/grafana_plugin/views/self_hosted_install.py
create mode 100644 engine/apps/grafana_plugin/views/status.py
create mode 100644 engine/apps/grafana_plugin/views/sync.py
create mode 100644 engine/apps/grafana_plugin/views/sync_organization.py
create mode 100644 engine/apps/grafana_plugin_management/__init__.py
create mode 100644 engine/apps/grafana_plugin_management/urls.py
create mode 100644 engine/apps/grafana_plugin_management/views/__init__.py
create mode 100644 engine/apps/grafana_plugin_management/views/plugin_installations.py
create mode 100644 engine/apps/heartbeat/__init__.py
create mode 100644 engine/apps/heartbeat/admin.py
create mode 100644 engine/apps/heartbeat/migrations/0001_squashed_initial.py
create mode 100644 engine/apps/heartbeat/migrations/__init__.py
create mode 100644 engine/apps/heartbeat/models.py
create mode 100644 engine/apps/heartbeat/tasks.py
create mode 100644 engine/apps/heartbeat/tests/__init__.py
create mode 100644 engine/apps/heartbeat/tests/factories.py
create mode 100644 engine/apps/heartbeat/tests/test_integration_heartbeat.py
create mode 100644 engine/apps/integrations/__init__.py
create mode 100644 engine/apps/integrations/metadata/__init__.py
create mode 100644 engine/apps/integrations/metadata/configuration/alertmanager.py
create mode 100644 engine/apps/integrations/metadata/configuration/amazon_sns.py
create mode 100644 engine/apps/integrations/metadata/configuration/formatted_webhook.py
create mode 100644 engine/apps/integrations/metadata/configuration/grafana.py
create mode 100644 engine/apps/integrations/metadata/configuration/grafana_alerting.py
create mode 100644 engine/apps/integrations/metadata/configuration/heartbeat.py
create mode 100644 engine/apps/integrations/metadata/configuration/inbound_email.py
create mode 100644 engine/apps/integrations/metadata/configuration/maintenance.py
create mode 100644 engine/apps/integrations/metadata/configuration/manual.py
create mode 100644 engine/apps/integrations/metadata/configuration/slack_channel.py
create mode 100644 engine/apps/integrations/metadata/configuration/webhook.py
create mode 100644 engine/apps/integrations/metadata/heartbeat/__init__.py
create mode 100644 engine/apps/integrations/metadata/heartbeat/_heartbeat_text_creator.py
create mode 100644 engine/apps/integrations/metadata/heartbeat/alertmanager.py
create mode 100644 engine/apps/integrations/metadata/heartbeat/elastalert.py
create mode 100644 engine/apps/integrations/metadata/heartbeat/formatted_webhook.py
create mode 100644 engine/apps/integrations/metadata/heartbeat/grafana.py
create mode 100644 engine/apps/integrations/metadata/heartbeat/prtg.py
create mode 100644 engine/apps/integrations/metadata/heartbeat/webhook.py
create mode 100644 engine/apps/integrations/metadata/heartbeat/zabbix.py
create mode 100644 engine/apps/integrations/mixins/__init__.py
create mode 100644 engine/apps/integrations/mixins/alert_channel_defining_mixin.py
create mode 100644 engine/apps/integrations/mixins/browsable_instruction_mixin.py
create mode 100644 engine/apps/integrations/mixins/ratelimit_mixin.py
create mode 100644 engine/apps/integrations/tasks.py
create mode 100644 engine/apps/integrations/templates/heartbeat_instructions/alertmanager.html
create mode 100644 engine/apps/integrations/templates/heartbeat_instructions/elastalert.html
create mode 100644 engine/apps/integrations/templates/heartbeat_instructions/formatted_webhook.html
create mode 100644 engine/apps/integrations/templates/heartbeat_instructions/grafana.html
create mode 100644 engine/apps/integrations/templates/heartbeat_instructions/prtg.html
create mode 100644 engine/apps/integrations/templates/heartbeat_instructions/webhook.html
create mode 100644 engine/apps/integrations/templates/heartbeat_instructions/zabbix.html
create mode 100644 engine/apps/integrations/templates/heartbeat_link.html
create mode 100644 engine/apps/integrations/templates/html/integration_alertmanager.html
create mode 100644 engine/apps/integrations/templates/html/integration_amazon_sns.html
create mode 100644 engine/apps/integrations/templates/html/integration_curler.html
create mode 100644 engine/apps/integrations/templates/html/integration_datadog.html
create mode 100644 engine/apps/integrations/templates/html/integration_demo.html
create mode 100644 engine/apps/integrations/templates/html/integration_elastalert.html
create mode 100644 engine/apps/integrations/templates/html/integration_fabric.html
create mode 100644 engine/apps/integrations/templates/html/integration_formatted_webhook.html
create mode 100644 engine/apps/integrations/templates/html/integration_grafana.html
create mode 100644 engine/apps/integrations/templates/html/integration_grafana_alerting.html
create mode 100644 engine/apps/integrations/templates/html/integration_heartbeat.html
create mode 100644 engine/apps/integrations/templates/html/integration_inbound_email.html
create mode 100644 engine/apps/integrations/templates/html/integration_kapacitor.html
create mode 100644 engine/apps/integrations/templates/html/integration_manual.html
create mode 100644 engine/apps/integrations/templates/html/integration_newrelic.html
create mode 100644 engine/apps/integrations/templates/html/integration_pagerduty.html
create mode 100644 engine/apps/integrations/templates/html/integration_pingdom.html
create mode 100644 engine/apps/integrations/templates/html/integration_prtg.html
create mode 100644 engine/apps/integrations/templates/html/integration_sentry.html
create mode 100644 engine/apps/integrations/templates/html/integration_sentry_platform.html
create mode 100644 engine/apps/integrations/templates/html/integration_slack_channel.html
create mode 100644 engine/apps/integrations/templates/html/integration_stackdriver.html
create mode 100644 engine/apps/integrations/templates/html/integration_uptimerobot.html
create mode 100644 engine/apps/integrations/templates/html/integration_webhook.html
create mode 100644 engine/apps/integrations/templates/html/integration_zabbix.html
create mode 100644 engine/apps/integrations/templates/integration_link.html
create mode 100644 engine/apps/integrations/tests/__init__.py
create mode 100644 engine/apps/integrations/tests/test_heartbeat_metadata.py
create mode 100644 engine/apps/integrations/tests/test_ratelimit.py
create mode 100644 engine/apps/integrations/tests/test_tasks.py
create mode 100644 engine/apps/integrations/tests/test_views.py
create mode 100644 engine/apps/integrations/urls.py
create mode 100644 engine/apps/integrations/views.py
create mode 100644 engine/apps/migration_tool/__init__.py
create mode 100644 engine/apps/migration_tool/constants.py
create mode 100644 engine/apps/migration_tool/migrations/0001_squashed_initial.py
create mode 100644 engine/apps/migration_tool/migrations/0002_amixrmigrationtaskstatus_organization.py
create mode 100644 engine/apps/migration_tool/migrations/__init__.py
create mode 100644 engine/apps/migration_tool/models/__init__.py
create mode 100644 engine/apps/migration_tool/models/amixr_migration_task_status.py
create mode 100644 engine/apps/migration_tool/models/locked_alert.py
create mode 100644 engine/apps/migration_tool/tasks.py
create mode 100644 engine/apps/migration_tool/urls.py
create mode 100644 engine/apps/migration_tool/utils.py
create mode 100644 engine/apps/migration_tool/views/__init__.py
create mode 100644 engine/apps/migration_tool/views/customers_migration_tool.py
create mode 100644 engine/apps/oss_installation/__init__.py
create mode 100644 engine/apps/oss_installation/migrations/0001_squashed_initial.py
create mode 100644 engine/apps/oss_installation/migrations/__init__.py
create mode 100644 engine/apps/oss_installation/models/__init__.py
create mode 100644 engine/apps/oss_installation/models/heartbeat.py
create mode 100644 engine/apps/oss_installation/models/oss_installation.py
create mode 100644 engine/apps/oss_installation/tasks.py
create mode 100644 engine/apps/oss_installation/urls.py
create mode 100644 engine/apps/oss_installation/usage_stats.py
create mode 100644 engine/apps/oss_installation/utils.py
create mode 100644 engine/apps/oss_installation/views/__init__.py
create mode 100644 engine/apps/oss_installation/views/cloud_heartbeat_status.py
create mode 100644 engine/apps/public_api/__init__.py
create mode 100644 engine/apps/public_api/constants.py
create mode 100644 engine/apps/public_api/custom_renderers.py
create mode 100644 engine/apps/public_api/helpers.py
create mode 100644 engine/apps/public_api/serializers/__init__.py
create mode 100644 engine/apps/public_api/serializers/action.py
create mode 100644 engine/apps/public_api/serializers/alerts.py
create mode 100644 engine/apps/public_api/serializers/escalation_chains.py
create mode 100644 engine/apps/public_api/serializers/escalation_policies.py
create mode 100644 engine/apps/public_api/serializers/incidents.py
create mode 100644 engine/apps/public_api/serializers/integrations.py
create mode 100644 engine/apps/public_api/serializers/integtration_heartbeat.py
create mode 100644 engine/apps/public_api/serializers/maintenance.py
create mode 100644 engine/apps/public_api/serializers/on_call_shifts.py
create mode 100644 engine/apps/public_api/serializers/organizations.py
create mode 100644 engine/apps/public_api/serializers/personal_notification_rules.py
create mode 100644 engine/apps/public_api/serializers/resolution_notes.py
create mode 100644 engine/apps/public_api/serializers/routes.py
create mode 100644 engine/apps/public_api/serializers/schedules_base.py
create mode 100644 engine/apps/public_api/serializers/schedules_calendar.py
create mode 100644 engine/apps/public_api/serializers/schedules_ical.py
create mode 100644 engine/apps/public_api/serializers/schedules_polymorphic.py
create mode 100644 engine/apps/public_api/serializers/slack_channel.py
create mode 100644 engine/apps/public_api/serializers/teams.py
create mode 100644 engine/apps/public_api/serializers/user_groups.py
create mode 100644 engine/apps/public_api/serializers/users.py
create mode 100644 engine/apps/public_api/tests/__init__.py
create mode 100644 engine/apps/public_api/tests/conftest.py
create mode 100644 engine/apps/public_api/tests/test_alerts.py
create mode 100644 engine/apps/public_api/tests/test_custom_actions.py
create mode 100644 engine/apps/public_api/tests/test_demo_token/__init__.py
create mode 100644 engine/apps/public_api/tests/test_demo_token/test_alerts.py
create mode 100644 engine/apps/public_api/tests/test_demo_token/test_custom_actions.py
create mode 100644 engine/apps/public_api/tests/test_demo_token/test_escalation_policies.py
create mode 100644 engine/apps/public_api/tests/test_demo_token/test_incidents.py
create mode 100644 engine/apps/public_api/tests/test_demo_token/test_integrations.py
create mode 100644 engine/apps/public_api/tests/test_demo_token/test_on_call_shift.py
create mode 100644 engine/apps/public_api/tests/test_demo_token/test_personal_notification_rules.py
create mode 100644 engine/apps/public_api/tests/test_demo_token/test_resolution_notes.py
create mode 100644 engine/apps/public_api/tests/test_demo_token/test_routes.py
create mode 100644 engine/apps/public_api/tests/test_demo_token/test_schedules.py
create mode 100644 engine/apps/public_api/tests/test_demo_token/test_slack_channels.py
create mode 100644 engine/apps/public_api/tests/test_demo_token/test_user_groups.py
create mode 100644 engine/apps/public_api/tests/test_demo_token/test_users.py
create mode 100644 engine/apps/public_api/tests/test_escalation_chain.py
create mode 100644 engine/apps/public_api/tests/test_escalation_policies.py
create mode 100644 engine/apps/public_api/tests/test_incidents.py
create mode 100644 engine/apps/public_api/tests/test_integrations.py
create mode 100644 engine/apps/public_api/tests/test_maintenance.py
create mode 100644 engine/apps/public_api/tests/test_on_call_shifts.py
create mode 100644 engine/apps/public_api/tests/test_personal_notification_rules.py
create mode 100644 engine/apps/public_api/tests/test_ratelimit.py
create mode 100644 engine/apps/public_api/tests/test_resolution_notes.py
create mode 100644 engine/apps/public_api/tests/test_routes.py
create mode 100644 engine/apps/public_api/tests/test_schedule_export.py
create mode 100644 engine/apps/public_api/tests/test_schedules.py
create mode 100644 engine/apps/public_api/tests/test_slack_channels.py
create mode 100644 engine/apps/public_api/tests/test_teams.py
create mode 100644 engine/apps/public_api/tests/test_user_groups.py
create mode 100644 engine/apps/public_api/tests/test_users.py
create mode 100644 engine/apps/public_api/throttlers/__init__.py
create mode 100644 engine/apps/public_api/throttlers/user_throttle.py
create mode 100644 engine/apps/public_api/urls.py
create mode 100644 engine/apps/public_api/views/__init__.py
create mode 100644 engine/apps/public_api/views/action.py
create mode 100644 engine/apps/public_api/views/alerts.py
create mode 100644 engine/apps/public_api/views/escalation_chains.py
create mode 100644 engine/apps/public_api/views/escalation_policies.py
create mode 100644 engine/apps/public_api/views/incidents.py
create mode 100644 engine/apps/public_api/views/info.py
create mode 100644 engine/apps/public_api/views/integrations.py
create mode 100644 engine/apps/public_api/views/maintaiable_object_mixin.py
create mode 100644 engine/apps/public_api/views/on_call_shifts.py
create mode 100644 engine/apps/public_api/views/organizations.py
create mode 100644 engine/apps/public_api/views/personal_notifications.py
create mode 100644 engine/apps/public_api/views/resolution_notes.py
create mode 100644 engine/apps/public_api/views/routes.py
create mode 100644 engine/apps/public_api/views/schedules.py
create mode 100644 engine/apps/public_api/views/slack_channels.py
create mode 100644 engine/apps/public_api/views/teams.py
create mode 100644 engine/apps/public_api/views/user_groups.py
create mode 100644 engine/apps/public_api/views/users.py
create mode 100644 engine/apps/schedules/__init__.py
create mode 100644 engine/apps/schedules/admin.py
create mode 100644 engine/apps/schedules/ical_events/__init__.py
create mode 100644 engine/apps/schedules/ical_events/adapter/__init__.py
create mode 100644 engine/apps/schedules/ical_events/adapter/amixr_recurring_ical_events_adapter.py
create mode 100644 engine/apps/schedules/ical_events/adapter/recurring_ical_events_adapter.py
create mode 100644 engine/apps/schedules/ical_events/proxy/__init__.py
create mode 100644 engine/apps/schedules/ical_events/proxy/ical_proxy.py
create mode 100644 engine/apps/schedules/ical_utils.py
create mode 100644 engine/apps/schedules/migrations/0001_squashed_initial.py
create mode 100644 engine/apps/schedules/migrations/0002_squashed_initial.py
create mode 100644 engine/apps/schedules/migrations/__init__.py
create mode 100644 engine/apps/schedules/models/__init__.py
create mode 100644 engine/apps/schedules/models/custom_on_call_shift.py
create mode 100644 engine/apps/schedules/models/on_call_schedule.py
create mode 100644 engine/apps/schedules/tasks/__init__.py
create mode 100644 engine/apps/schedules/tasks/drop_cached_ical.py
create mode 100644 engine/apps/schedules/tasks/notify_about_empty_shifts_in_schedule.py
create mode 100644 engine/apps/schedules/tasks/notify_about_gaps_in_schedule.py
create mode 100644 engine/apps/schedules/tasks/refresh_ical_files.py
create mode 100644 engine/apps/schedules/tests/__init__.py
create mode 100644 engine/apps/schedules/tests/calendars/calendar_with_all_day_event.ics
create mode 100644 engine/apps/schedules/tests/calendars/calendar_with_edited_recurring_events.ics
create mode 100644 engine/apps/schedules/tests/calendars/calendar_with_recurring_event.ics
create mode 100644 engine/apps/schedules/tests/conftest.py
create mode 100644 engine/apps/schedules/tests/factories.py
create mode 100644 engine/apps/schedules/tests/test_amixr_users_in_ical.py
create mode 100644 engine/apps/schedules/tests/test_custom_on_call_shift.py
create mode 100644 engine/apps/schedules/tests/test_ical_proxy.py
create mode 100644 engine/apps/sendgridapp/__init__.py
create mode 100644 engine/apps/sendgridapp/constants.py
create mode 100644 engine/apps/sendgridapp/migrations/__init__.py
create mode 100644 engine/apps/sendgridapp/models.py
create mode 100644 engine/apps/sendgridapp/parse.py
create mode 100644 engine/apps/sendgridapp/permissions.py
create mode 100644 engine/apps/sendgridapp/templates/email_notification.html
create mode 100644 engine/apps/sendgridapp/templates/email_verification.html
create mode 100644 engine/apps/sendgridapp/tests/__init__.py
create mode 100644 engine/apps/sendgridapp/tests/factories.py
create mode 100644 engine/apps/sendgridapp/tests/test_emails.py
create mode 100644 engine/apps/sendgridapp/urls.py
create mode 100644 engine/apps/sendgridapp/verification_token.py
create mode 100644 engine/apps/sendgridapp/views.py
create mode 100644 engine/apps/slack/__init__.py
create mode 100644 engine/apps/slack/admin.py
create mode 100644 engine/apps/slack/constants.py
create mode 100644 engine/apps/slack/migrations/0001_squashed_initial.py
create mode 100644 engine/apps/slack/migrations/0002_squashed_initial.py
create mode 100644 engine/apps/slack/migrations/0003_squashed_create_demo_token_instances.py
create mode 100644 engine/apps/slack/migrations/__init__.py
create mode 100644 engine/apps/slack/models/__init__.py
create mode 100644 engine/apps/slack/models/slack_action_record.py
create mode 100644 engine/apps/slack/models/slack_channel.py
create mode 100644 engine/apps/slack/models/slack_message.py
create mode 100644 engine/apps/slack/models/slack_team_identity.py
create mode 100644 engine/apps/slack/models/slack_user_identity.py
create mode 100644 engine/apps/slack/models/slack_usergroup.py
create mode 100644 engine/apps/slack/representatives/__init__.py
create mode 100644 engine/apps/slack/representatives/alert_group_representative.py
create mode 100644 engine/apps/slack/representatives/user_representative.py
create mode 100644 engine/apps/slack/scenarios/__init__.py
create mode 100644 engine/apps/slack/scenarios/alertgroup_appearance.py
create mode 100644 engine/apps/slack/scenarios/distribute_alerts.py
create mode 100644 engine/apps/slack/scenarios/escalation_delivery.py
create mode 100644 engine/apps/slack/scenarios/notification_delivery.py
create mode 100644 engine/apps/slack/scenarios/onboarding.py
create mode 100644 engine/apps/slack/scenarios/profile_update.py
create mode 100644 engine/apps/slack/scenarios/public_menu.py
create mode 100644 engine/apps/slack/scenarios/resolution_note.py
create mode 100644 engine/apps/slack/scenarios/scenario_step.py
create mode 100644 engine/apps/slack/scenarios/schedules.py
create mode 100644 engine/apps/slack/scenarios/slack_channel.py
create mode 100644 engine/apps/slack/scenarios/slack_channel_integration.py
create mode 100644 engine/apps/slack/scenarios/slack_renderer.py
create mode 100644 engine/apps/slack/scenarios/slack_usergroup.py
create mode 100644 engine/apps/slack/scenarios/step_mixins.py
create mode 100644 engine/apps/slack/slack_client/__init__.py
create mode 100644 engine/apps/slack/slack_client/exceptions.py
create mode 100644 engine/apps/slack/slack_client/slack_client.py
create mode 100644 engine/apps/slack/slack_client/slack_client_server.py
create mode 100644 engine/apps/slack/slack_formatter.py
create mode 100644 engine/apps/slack/tasks.py
create mode 100644 engine/apps/slack/templates/admin/slack_teams_summary_change_list.html
create mode 100644 engine/apps/slack/tests/__init__.py
create mode 100644 engine/apps/slack/tests/conftest.py
create mode 100644 engine/apps/slack/tests/factories.py
create mode 100644 engine/apps/slack/tests/test_create_message_blocks.py
create mode 100644 engine/apps/slack/tests/test_parse_slack_usernames.py
create mode 100644 engine/apps/slack/tests/test_populate_slack_channels.py
create mode 100644 engine/apps/slack/tests/test_reset_slack.py
create mode 100644 engine/apps/slack/tests/test_scenario_steps/__init__.py
create mode 100644 engine/apps/slack/tests/test_scenario_steps/test_distribute_alerts.py
create mode 100644 engine/apps/slack/tests/test_scenario_steps/test_resolution_note.py
create mode 100644 engine/apps/slack/tests/test_scenario_steps/test_slack_usergroup_steps.py
create mode 100644 engine/apps/slack/tests/test_user_group.py
create mode 100644 engine/apps/slack/urls.py
create mode 100644 engine/apps/slack/utils.py
create mode 100644 engine/apps/slack/views.py
create mode 100644 engine/apps/social_auth/__init__.py
create mode 100644 engine/apps/social_auth/backends.py
create mode 100644 engine/apps/social_auth/live_setting_django_strategy.py
create mode 100644 engine/apps/social_auth/middlewares.py
create mode 100644 engine/apps/social_auth/pipeline.py
create mode 100644 engine/apps/social_auth/urls.py
create mode 100644 engine/apps/social_auth/views.py
create mode 100644 engine/apps/telegram/__init__.py
create mode 100644 engine/apps/telegram/alert_group_representative.py
create mode 100644 engine/apps/telegram/apps.py
create mode 100644 engine/apps/telegram/client.py
create mode 100644 engine/apps/telegram/decorators.py
create mode 100644 engine/apps/telegram/migrations/0001_squashed_initial.py
create mode 100644 engine/apps/telegram/migrations/__init__.py
create mode 100644 engine/apps/telegram/models/__init__.py
create mode 100644 engine/apps/telegram/models/connectors/__init__.py
create mode 100644 engine/apps/telegram/models/connectors/channel.py
create mode 100644 engine/apps/telegram/models/connectors/personal.py
create mode 100644 engine/apps/telegram/models/message.py
create mode 100644 engine/apps/telegram/models/verification/__init__.py
create mode 100644 engine/apps/telegram/models/verification/channel.py
create mode 100644 engine/apps/telegram/models/verification/personal.py
create mode 100644 engine/apps/telegram/renderers/__init__.py
create mode 100644 engine/apps/telegram/renderers/keyboard.py
create mode 100644 engine/apps/telegram/renderers/message.py
create mode 100644 engine/apps/telegram/signals.py
create mode 100644 engine/apps/telegram/tasks.py
create mode 100644 engine/apps/telegram/tests/__init__.py
create mode 100644 engine/apps/telegram/tests/factories.py
create mode 100644 engine/apps/telegram/tests/test_keyboard_renderer.py
create mode 100644 engine/apps/telegram/tests/test_message_renderer.py
create mode 100644 engine/apps/telegram/tests/test_models.py
create mode 100644 engine/apps/telegram/tests/test_update_handlers.py
create mode 100644 engine/apps/telegram/updates/__init__.py
create mode 100644 engine/apps/telegram/updates/update_handlers/__init__.py
create mode 100644 engine/apps/telegram/updates/update_handlers/button_press.py
create mode 100644 engine/apps/telegram/updates/update_handlers/channel_to_group_forward.py
create mode 100644 engine/apps/telegram/updates/update_handlers/start_message.py
create mode 100644 engine/apps/telegram/updates/update_handlers/update_handler.py
create mode 100644 engine/apps/telegram/updates/update_handlers/verification/__init__.py
create mode 100644 engine/apps/telegram/updates/update_handlers/verification/channel.py
create mode 100644 engine/apps/telegram/updates/update_handlers/verification/personal.py
create mode 100644 engine/apps/telegram/updates/update_manager.py
create mode 100644 engine/apps/telegram/urls.py
create mode 100644 engine/apps/telegram/utils.py
create mode 100644 engine/apps/telegram/views.py
create mode 100644 engine/apps/twilioapp/__init__.py
create mode 100644 engine/apps/twilioapp/admin.py
create mode 100644 engine/apps/twilioapp/constants.py
create mode 100644 engine/apps/twilioapp/migrations/0001_squashed_initial.py
create mode 100644 engine/apps/twilioapp/migrations/__init__.py
create mode 100644 engine/apps/twilioapp/models/__init__.py
create mode 100644 engine/apps/twilioapp/models/phone_call.py
create mode 100644 engine/apps/twilioapp/models/sms_message.py
create mode 100644 engine/apps/twilioapp/models/twilio_log_record.py
create mode 100644 engine/apps/twilioapp/phone_manager.py
create mode 100644 engine/apps/twilioapp/tests/__init__.py
create mode 100644 engine/apps/twilioapp/tests/factories.py
create mode 100644 engine/apps/twilioapp/tests/test_phone_calls.py
create mode 100644 engine/apps/twilioapp/tests/test_sms_message.py
create mode 100644 engine/apps/twilioapp/twilio_client.py
create mode 100644 engine/apps/twilioapp/urls.py
create mode 100644 engine/apps/twilioapp/utils.py
create mode 100644 engine/apps/twilioapp/views.py
create mode 100644 engine/apps/user_management/__init__.py
create mode 100644 engine/apps/user_management/admin.py
create mode 100644 engine/apps/user_management/migrations/0001_squashed_initial.py
create mode 100644 engine/apps/user_management/migrations/0002_squashed_create_demo_token_instances.py
create mode 100644 engine/apps/user_management/migrations/__init__.py
create mode 100644 engine/apps/user_management/models/__init__.py
create mode 100644 engine/apps/user_management/models/organization.py
create mode 100644 engine/apps/user_management/models/team.py
create mode 100644 engine/apps/user_management/models/user.py
create mode 100644 engine/apps/user_management/organization_log_creator/__init__.py
create mode 100644 engine/apps/user_management/organization_log_creator/create_organization_log.py
create mode 100644 engine/apps/user_management/organization_log_creator/organization_log_type.py
create mode 100644 engine/apps/user_management/subscription_strategy/__init__.py
create mode 100644 engine/apps/user_management/subscription_strategy/base_subsription_strategy.py
create mode 100644 engine/apps/user_management/subscription_strategy/free_public_beta_subscription_strategy.py
create mode 100644 engine/apps/user_management/sync.py
create mode 100644 engine/apps/user_management/tests/__init__.py
create mode 100644 engine/apps/user_management/tests/factories.py
create mode 100644 engine/apps/user_management/tests/test_free_public_beta_subcription_strategy.py
create mode 100644 engine/apps/user_management/tests/test_organization.py
create mode 100644 engine/apps/user_management/tests/test_sync.py
create mode 100644 engine/apps/user_management/tests/test_user.py
create mode 100644 engine/apps/user_management/user_representative.py
create mode 100755 engine/celery_with_exporter.sh
create mode 100644 engine/common/__init__.py
create mode 100644 engine/common/admin.py
create mode 100644 engine/common/api_helpers/__init__.py
create mode 100644 engine/common/api_helpers/custom_fields.py
create mode 100644 engine/common/api_helpers/exceptions.py
create mode 100644 engine/common/api_helpers/filters.py
create mode 100644 engine/common/api_helpers/mixins.py
create mode 100644 engine/common/api_helpers/optional_slash_router.py
create mode 100644 engine/common/api_helpers/paginators.py
create mode 100644 engine/common/api_helpers/utils.py
create mode 100644 engine/common/constants/__init__.py
create mode 100644 engine/common/constants/role.py
create mode 100644 engine/common/constants/slack_auth.py
create mode 100644 engine/common/custom_celery_tasks/__init__.py
create mode 100644 engine/common/custom_celery_tasks/create_alert_base_task.py
create mode 100644 engine/common/custom_celery_tasks/dedicated_queue_retry_task.py
create mode 100644 engine/common/custom_celery_tasks/safe_to_broker_outage_task.py
create mode 100644 engine/common/exceptions/__init__.py
create mode 100644 engine/common/exceptions/exceptions.py
create mode 100644 engine/common/jinja_templater/__init__.py
create mode 100644 engine/common/jinja_templater/apply_jinja_template.py
create mode 100644 engine/common/jinja_templater/filters.py
create mode 100644 engine/common/jinja_templater/jinja_template_env.py
create mode 100644 engine/common/mixins/use_random_readonly_db_manager_mixin.py
create mode 100644 engine/common/public_primary_keys.py
create mode 100644 engine/common/tests/__init__.py
create mode 100644 engine/common/tests/test_clean_markup.py
create mode 100644 engine/common/tests/test_urlize.py
create mode 100644 engine/common/utils.py
create mode 100644 engine/conftest.py
create mode 100644 engine/engine/__init__.py
create mode 100644 engine/engine/celery.py
create mode 100644 engine/engine/logging/formatters.py
create mode 100644 engine/engine/management/commands/issue_invite_for_the_frontend.py
create mode 100644 engine/engine/management/commands/restart_escalation.py
create mode 100644 engine/engine/management/commands/start_celery.py
create mode 100644 engine/engine/middlewares.py
create mode 100644 engine/engine/parsers.py
create mode 100644 engine/engine/urls.py
create mode 100644 engine/engine/views.py
create mode 100644 engine/engine/wsgi.py
create mode 100755 engine/manage.py
create mode 100644 engine/pyproject.toml
create mode 100644 engine/requirements.txt
create mode 100644 engine/scripts/start_all_in_one.sh
create mode 100644 engine/settings/__init__.py
create mode 100644 engine/settings/all_in_one.py
create mode 100644 engine/settings/base.py
create mode 100644 engine/settings/ci-test.py
create mode 100644 engine/settings/dev.py
create mode 100644 engine/settings/prod_without_db.py
create mode 100644 engine/static/images/heartbeat_instructions/heartbeat_grafana_1.png
create mode 100644 engine/static/images/heartbeat_instructions/heartbeat_grafana_2.png
create mode 100644 engine/static/images/heartbeat_instructions/heartbeat_grafana_3.png
create mode 100644 engine/static/images/heartbeat_instructions/heartbeat_grafana_4.png
create mode 100644 engine/static/images/heartbeat_instructions/heartbeat_grafana_5.png
create mode 100644 engine/static/images/heartbeat_instructions/heartbeat_zabbix_1.png
create mode 100644 engine/static/images/heartbeat_instructions/heartbeat_zabbix_2.png
create mode 100644 engine/static/images/heartbeat_instructions/heartbeat_zabbix_3.png
create mode 100644 engine/static/images/heartbeat_instructions/heartbeat_zabbix_4.png
create mode 100644 engine/static/images/heartbeat_instructions/heartbeat_zabbix_5.png
create mode 100644 engine/static/images/postmortem.gif
create mode 100644 engine/tox.ini
create mode 100644 engine/uwsgi.ini
create mode 100755 engine/wait_for_test_mysql_start.sh
create mode 100644 grafana-plugin/.eslintignore
create mode 100644 grafana-plugin/.eslintrc.js
create mode 100644 grafana-plugin/.gitignore
create mode 100644 grafana-plugin/.prettierrc.js
create mode 100644 grafana-plugin/.release-it.js
create mode 100644 grafana-plugin/.stylelintignore
create mode 100644 grafana-plugin/.stylelintrc
create mode 120000 grafana-plugin/CHANGELOG.md
create mode 100644 grafana-plugin/LICENSE
create mode 100644 grafana-plugin/e2e/features/add-channel-filter.feature
create mode 100644 grafana-plugin/e2e/features/delete-notification-steps.feature
create mode 100644 grafana-plugin/e2e/features/steps/addChannelFilter.js
create mode 100644 grafana-plugin/e2e/features/steps/common.js
create mode 100644 grafana-plugin/e2e/features/steps/deleteNotificationSteps.js
create mode 100644 grafana-plugin/e2e/features/support/world.js
create mode 100644 grafana-plugin/e2e/utils/takeScreenshot.js
create mode 100644 grafana-plugin/grafana-plugin.yml.example
create mode 100644 grafana-plugin/jest.config.js
create mode 100644 grafana-plugin/package.json
create mode 100644 grafana-plugin/plopfile.js
create mode 100644 grafana-plugin/provisioning/.gitignore
create mode 100644 grafana-plugin/src/GrafanaPluginRootPage.tsx
create mode 100644 grafana-plugin/src/README.md
create mode 100755 grafana-plugin/src/assets/fonts/nucleo/nucleo-icons.eot
create mode 100755 grafana-plugin/src/assets/fonts/nucleo/nucleo-icons.svg
create mode 100755 grafana-plugin/src/assets/fonts/nucleo/nucleo-icons.ttf
create mode 100755 grafana-plugin/src/assets/fonts/nucleo/nucleo-icons.woff
create mode 100755 grafana-plugin/src/assets/fonts/nucleo/nucleo-icons.woff2
create mode 100644 grafana-plugin/src/assets/img/ElastAlert.svg
create mode 100644 grafana-plugin/src/assets/img/HeartBeatMonitoring.png
create mode 100644 grafana-plugin/src/assets/img/PagerDuty.png
create mode 100644 grafana-plugin/src/assets/img/arrows/arrow_07.svg
create mode 100644 grafana-plugin/src/assets/img/arrows/arrow_12.svg
create mode 100644 grafana-plugin/src/assets/img/arrows/arrow_35.svg
create mode 100644 grafana-plugin/src/assets/img/arrows/arrow_43.svg
create mode 100644 grafana-plugin/src/assets/img/brand/amixr-logo-blue.png
create mode 100644 grafana-plugin/src/assets/img/brand/amixr-logo-blue.svg
create mode 100644 grafana-plugin/src/assets/img/brand/amixr-logo.png
create mode 100755 grafana-plugin/src/assets/img/brand/argon-react-white.png
create mode 100755 grafana-plugin/src/assets/img/brand/argon-react.png
create mode 100755 grafana-plugin/src/assets/img/brand/blue.png
create mode 100755 grafana-plugin/src/assets/img/brand/favicon.png
create mode 100755 grafana-plugin/src/assets/img/brand/white.png
create mode 100644 grafana-plugin/src/assets/img/events_instructions.png
create mode 100644 grafana-plugin/src/assets/img/grafana_icon.svg
create mode 100644 grafana-plugin/src/assets/img/howto-phone.png
create mode 100644 grafana-plugin/src/assets/img/icons/common/avatar.svg
create mode 100755 grafana-plugin/src/assets/img/icons/common/github.svg
create mode 100755 grafana-plugin/src/assets/img/icons/common/google.svg
create mode 100644 grafana-plugin/src/assets/img/icons/common/slack.svg
create mode 100644 grafana-plugin/src/assets/img/icons/curler-icon.svg
create mode 100644 grafana-plugin/src/assets/img/icons/slack-colored.svg
create mode 100644 grafana-plugin/src/assets/img/integration-logos.png
create mode 100644 grafana-plugin/src/assets/img/prometheus_logo_grey.svg
create mode 100644 grafana-plugin/src/assets/img/slack_landing/slack_ai_suggestion.png
create mode 100644 grafana-plugin/src/assets/img/slack_landing/slack_install_permissions.png
create mode 100644 grafana-plugin/src/assets/img/slack_landing/slack_invitation_message.png
create mode 100644 grafana-plugin/src/assets/img/slack_landing/slack_log_and_plan_message.png
create mode 100644 grafana-plugin/src/assets/img/slack_landing/slack_main_message.png
create mode 100644 grafana-plugin/src/assets/img/slack_landing/slack_ok_message.png
create mode 100644 grafana-plugin/src/assets/img/slack_landing/slack_oncall_schedule_reminder.png
create mode 100644 grafana-plugin/src/assets/img/slack_landing/slack_postmortem_message.png
create mode 100644 grafana-plugin/src/assets/img/telegram.svg
create mode 100644 grafana-plugin/src/assets/img/telegram_channel_instruction_images/1.png
create mode 100644 grafana-plugin/src/assets/img/telegram_channel_instruction_images/2.png
create mode 100644 grafana-plugin/src/assets/img/telegram_channel_instruction_images/3.png
create mode 100644 grafana-plugin/src/assets/img/telegram_channel_instruction_images/4.png
create mode 100644 grafana-plugin/src/assets/img/telegram_channel_instruction_images/6.png
create mode 100644 grafana-plugin/src/assets/img/telegram_channel_instruction_images/7.png
create mode 100644 grafana-plugin/src/assets/img/telegram_channel_instruction_images/8.png
create mode 100644 grafana-plugin/src/assets/img/telegram_channel_instruction_images/9.png
create mode 100644 grafana-plugin/src/assets/img/telegram_channel_instruction_images/doge-meme-telegram.jpg
create mode 100755 grafana-plugin/src/assets/img/theme/angular.jpg
create mode 100755 grafana-plugin/src/assets/img/theme/bootstrap.jpg
create mode 100644 grafana-plugin/src/assets/img/theme/forrest.png
create mode 100755 grafana-plugin/src/assets/img/theme/profile-cover.jpg
create mode 100755 grafana-plugin/src/assets/img/theme/react.jpg
create mode 100755 grafana-plugin/src/assets/img/theme/sketch.jpg
create mode 100755 grafana-plugin/src/assets/img/theme/team-1-800x800.jpg
create mode 100755 grafana-plugin/src/assets/img/theme/team-2-800x800.jpg
create mode 100755 grafana-plugin/src/assets/img/theme/team-3-800x800.jpg
create mode 100755 grafana-plugin/src/assets/img/theme/team-4-800x800.jpg
create mode 100755 grafana-plugin/src/assets/img/theme/vue.jpg
create mode 100644 grafana-plugin/src/assets/img/users.svg
create mode 100644 grafana-plugin/src/components/AlertTemplates/AlertTemplatesForm.config.ts
create mode 100644 grafana-plugin/src/components/AlertTemplates/AlertTemplatesForm.helper.tsx
create mode 100644 grafana-plugin/src/components/AlertTemplates/AlertTemplatesForm.module.css
create mode 100644 grafana-plugin/src/components/AlertTemplates/AlertTemplatesForm.tsx
create mode 100644 grafana-plugin/src/components/Avatar/Avatar.module.css
create mode 100644 grafana-plugin/src/components/Avatar/Avatar.tsx
create mode 100644 grafana-plugin/src/components/CardButton/CardButton.module.css
create mode 100644 grafana-plugin/src/components/CardButton/CardButton.tsx
create mode 100644 grafana-plugin/src/components/Collapse/Collapse.module.css
create mode 100644 grafana-plugin/src/components/Collapse/Collapse.tsx
create mode 100644 grafana-plugin/src/components/EscalationsFilters/EscalationsFilters.module.css
create mode 100644 grafana-plugin/src/components/EscalationsFilters/EscalationsFilters.tsx
create mode 100644 grafana-plugin/src/components/GBlock/Block.module.css
create mode 100644 grafana-plugin/src/components/GBlock/Block.tsx
create mode 100644 grafana-plugin/src/components/GForm/GForm.module.css
create mode 100644 grafana-plugin/src/components/GForm/GForm.tsx
create mode 100644 grafana-plugin/src/components/GForm/GForm.types.ts
create mode 100644 grafana-plugin/src/components/GList/GList.module.css
create mode 100644 grafana-plugin/src/components/GList/GList.tsx
create mode 100644 grafana-plugin/src/components/GTable/GTable.module.css
create mode 100644 grafana-plugin/src/components/GTable/GTable.tsx
create mode 100644 grafana-plugin/src/components/IntegrationLogo/IntegrationLogo.config.ts
create mode 100644 grafana-plugin/src/components/IntegrationLogo/IntegrationLogo.module.css
create mode 100644 grafana-plugin/src/components/IntegrationLogo/IntegrationLogo.tsx
create mode 100644 grafana-plugin/src/components/IntegrationsFilters/IntegrationsFilters.module.css
create mode 100644 grafana-plugin/src/components/IntegrationsFilters/IntegrationsFilters.tsx
create mode 100644 grafana-plugin/src/components/MonacoJinja2Editor/MonacoJinja2Editor.module.css
create mode 100644 grafana-plugin/src/components/MonacoJinja2Editor/MonacoJinja2Editor.tsx
create mode 100644 grafana-plugin/src/components/MonacoJinja2Editor/jinja2.ts
create mode 100644 grafana-plugin/src/components/PluginLink/PluginLink.module.css
create mode 100644 grafana-plugin/src/components/PluginLink/PluginLink.tsx
create mode 100644 grafana-plugin/src/components/Policy/DragHandle.tsx
create mode 100644 grafana-plugin/src/components/Policy/EscalationPolicy.module.css
create mode 100644 grafana-plugin/src/components/Policy/EscalationPolicy.tsx
create mode 100644 grafana-plugin/src/components/Policy/NotificationPolicy.module.css
create mode 100644 grafana-plugin/src/components/Policy/NotificationPolicy.tsx
create mode 100644 grafana-plugin/src/components/Policy/Policy.module.css
create mode 100644 grafana-plugin/src/components/Policy/PolicyNote.tsx
create mode 100644 grafana-plugin/src/components/SchedulesFilters/SchedulesFilters.helpers.ts
create mode 100644 grafana-plugin/src/components/SchedulesFilters/SchedulesFilters.module.css
create mode 100644 grafana-plugin/src/components/SchedulesFilters/SchedulesFilters.tsx
create mode 100644 grafana-plugin/src/components/SchedulesFilters/SchedulesFilters.types.ts
create mode 100644 grafana-plugin/src/components/SortableList/SortableList.tsx
create mode 100644 grafana-plugin/src/components/SourceCode/SourceCode.module.css
create mode 100644 grafana-plugin/src/components/SourceCode/SourceCode.tsx
create mode 100644 grafana-plugin/src/components/Tag/Tag.module.css
create mode 100644 grafana-plugin/src/components/Tag/Tag.tsx
create mode 100644 grafana-plugin/src/components/Text/Text.module.css
create mode 100644 grafana-plugin/src/components/Text/Text.tsx
create mode 100644 grafana-plugin/src/components/TimeRange/TimeRange.module.css
create mode 100644 grafana-plugin/src/components/TimeRange/TimeRange.tsx
create mode 100644 grafana-plugin/src/components/Timeline/Timeline.module.css
create mode 100644 grafana-plugin/src/components/Timeline/Timeline.tsx
create mode 100644 grafana-plugin/src/components/Timeline/TimelineItem.tsx
create mode 100644 grafana-plugin/src/components/Tutorial/Tutorial.module.css
create mode 100644 grafana-plugin/src/components/Tutorial/Tutorial.tsx
create mode 100644 grafana-plugin/src/components/Tutorial/Tutorial.types.ts
create mode 100644 grafana-plugin/src/components/Tutorial/icons/bell-icon.svg
create mode 100644 grafana-plugin/src/components/Tutorial/icons/calendar-icon.svg
create mode 100644 grafana-plugin/src/components/Tutorial/icons/chat-icon.svg
create mode 100644 grafana-plugin/src/components/Tutorial/icons/escalation-icon.svg
create mode 100644 grafana-plugin/src/components/Tutorial/icons/integration-icon.svg
create mode 100644 grafana-plugin/src/components/UsersFilters/UsersFilters.module.css
create mode 100644 grafana-plugin/src/components/UsersFilters/UsersFilters.tsx
create mode 100644 grafana-plugin/src/components/VerticalTabsBar/VerticalTabsBar.module.css
create mode 100644 grafana-plugin/src/components/VerticalTabsBar/VerticalTabsBar.tsx
create mode 100644 grafana-plugin/src/components/WithConfirm/WithConfirm.module.css
create mode 100644 grafana-plugin/src/components/WithConfirm/WithConfirm.tsx
create mode 100644 grafana-plugin/src/containers/AlertReceiveChannelCard/AlertReceiveChannelCard.module.css
create mode 100644 grafana-plugin/src/containers/AlertReceiveChannelCard/AlertReceiveChannelCard.tsx
create mode 100644 grafana-plugin/src/containers/AlertRules/AlertRules.helpers.ts
create mode 100644 grafana-plugin/src/containers/AlertRules/AlertRules.module.css
create mode 100644 grafana-plugin/src/containers/AlertRules/AlertRules.tsx
create mode 100644 grafana-plugin/src/containers/AlertRules/parts/connectors/SlackConnector.tsx
create mode 100644 grafana-plugin/src/containers/AlertRules/parts/connectors/TelegramConnector.tsx
create mode 100644 grafana-plugin/src/containers/AlertRules/parts/connectors/index.module.css
create mode 100644 grafana-plugin/src/containers/AlertRules/parts/index.module.css
create mode 100644 grafana-plugin/src/containers/AlertRules/parts/index.tsx
create mode 100644 grafana-plugin/src/containers/AlertTemplatesFormContainer/AlertTemplatesFormContainer.tsx
create mode 100644 grafana-plugin/src/containers/ApiTokenSettings/ApiTokenForm.tsx
create mode 100644 grafana-plugin/src/containers/ApiTokenSettings/ApiTokenSettings.module.css
create mode 100644 grafana-plugin/src/containers/ApiTokenSettings/ApiTokenSettings.tsx
create mode 100644 grafana-plugin/src/containers/AttachIncidentForm/AttachIncidentForm.module.css
create mode 100644 grafana-plugin/src/containers/AttachIncidentForm/AttachIncidentForm.tsx
create mode 100644 grafana-plugin/src/containers/ChannelFilterForm/ChannelFilterForm.module.css
create mode 100644 grafana-plugin/src/containers/ChannelFilterForm/ChannelFilterForm.tsx
create mode 100644 grafana-plugin/src/containers/CreateAlertReceiveChannelContainer/CreateAlertReceiveChannelContainer.module.css
create mode 100644 grafana-plugin/src/containers/CreateAlertReceiveChannelContainer/CreateAlertReceiveChannelContainer.tsx
create mode 100644 grafana-plugin/src/containers/DefaultPageLayout/DefaultPageLayout.helpers.tsx
create mode 100644 grafana-plugin/src/containers/DefaultPageLayout/DefaultPageLayout.module.css
create mode 100644 grafana-plugin/src/containers/DefaultPageLayout/DefaultPageLayout.tsx
create mode 100644 grafana-plugin/src/containers/DefaultPageLayout/DefaultPageLayout.types.ts
create mode 100644 grafana-plugin/src/containers/EscalationChainCard/EscalationChainCard.module.css
create mode 100644 grafana-plugin/src/containers/EscalationChainCard/EscalationChainCard.tsx
create mode 100644 grafana-plugin/src/containers/EscalationChainForm/EscalationChainForm.module.css
create mode 100644 grafana-plugin/src/containers/EscalationChainForm/EscalationChainForm.tsx
create mode 100644 grafana-plugin/src/containers/EscalationChainSteps/EscalationChainSteps.module.css
create mode 100644 grafana-plugin/src/containers/EscalationChainSteps/EscalationChainSteps.tsx
create mode 100644 grafana-plugin/src/containers/GSelect/GSelect.module.css
create mode 100644 grafana-plugin/src/containers/GSelect/GSelect.tsx
create mode 100644 grafana-plugin/src/containers/GrafanaTeamSelect/GrafanaTeamSelect.module.css
create mode 100644 grafana-plugin/src/containers/GrafanaTeamSelect/GrafanaTeamSelect.tsx
create mode 100644 grafana-plugin/src/containers/HeartbeatModal/HeartbeatForm.module.css
create mode 100644 grafana-plugin/src/containers/HeartbeatModal/HeartbeatForm.tsx
create mode 100644 grafana-plugin/src/containers/IncidentMatcher/IncidentMatcher.module.css
create mode 100644 grafana-plugin/src/containers/IncidentMatcher/IncidentMatcher.tsx
create mode 100644 grafana-plugin/src/containers/IncidentsFilters/IncidentFilters.helpers.ts
create mode 100644 grafana-plugin/src/containers/IncidentsFilters/IncidentFilters.types.ts
create mode 100644 grafana-plugin/src/containers/IncidentsFilters/IncidentsFilters.module.css
create mode 100644 grafana-plugin/src/containers/IncidentsFilters/IncidentsFilters.tsx
create mode 100644 grafana-plugin/src/containers/IntegrationSettings/IntegrationSettings.module.css
create mode 100644 grafana-plugin/src/containers/IntegrationSettings/IntegrationSettings.tsx
create mode 100644 grafana-plugin/src/containers/IntegrationSettings/IntegrationSettings.types.ts
create mode 100644 grafana-plugin/src/containers/IntegrationSettings/parts/Autoresolve.module.css
create mode 100644 grafana-plugin/src/containers/IntegrationSettings/parts/Autoresolve.tsx
create mode 100644 grafana-plugin/src/containers/IntegrationSettings/parts/LiveLogs.tsx
create mode 100644 grafana-plugin/src/containers/MaintenanceForm/MaintenanceForm.config.tsx
create mode 100644 grafana-plugin/src/containers/MaintenanceForm/MaintenanceForm.helpers.ts
create mode 100644 grafana-plugin/src/containers/MaintenanceForm/MaintenanceForm.module.css
create mode 100644 grafana-plugin/src/containers/MaintenanceForm/MaintenanceForm.tsx
create mode 100644 grafana-plugin/src/containers/MobileAppVerification/MobileAppVerification.module.css
create mode 100644 grafana-plugin/src/containers/MobileAppVerification/MobileAppVerification.tsx
create mode 100644 grafana-plugin/src/containers/OrganizationLogFilters/OrganizationLogFilters.module.css
create mode 100644 grafana-plugin/src/containers/OrganizationLogFilters/OrganizationLogFilters.tsx
create mode 100644 grafana-plugin/src/containers/OutgoingWebhookForm/OutgoingWebhookForm.config.ts
create mode 100644 grafana-plugin/src/containers/OutgoingWebhookForm/OutgoingWebhookForm.module.css
create mode 100644 grafana-plugin/src/containers/OutgoingWebhookForm/OutgoingWebhookForm.tsx
create mode 100644 grafana-plugin/src/containers/PersonalNotificationSettings/PersonalNotificationSettings.helpers.ts
create mode 100644 grafana-plugin/src/containers/PersonalNotificationSettings/PersonalNotificationSettings.module.css
create mode 100644 grafana-plugin/src/containers/PersonalNotificationSettings/PersonalNotificationSettings.tsx
create mode 100644 grafana-plugin/src/containers/PersonalNotificationSettings/img/default-step.png
create mode 100644 grafana-plugin/src/containers/PluginConfigPage/PluginConfigPage.module.css
create mode 100644 grafana-plugin/src/containers/PluginConfigPage/PluginConfigPage.tsx
create mode 100644 grafana-plugin/src/containers/RemoteSelect/RemoteSelect.module.css
create mode 100644 grafana-plugin/src/containers/RemoteSelect/RemoteSelect.tsx
create mode 100644 grafana-plugin/src/containers/ScheduleForm/ScheduleForm.config.ts
create mode 100644 grafana-plugin/src/containers/ScheduleForm/ScheduleForm.helpers.ts
create mode 100644 grafana-plugin/src/containers/ScheduleForm/ScheduleForm.module.css
create mode 100644 grafana-plugin/src/containers/ScheduleForm/ScheduleForm.tsx
create mode 100644 grafana-plugin/src/containers/ScheduleIcalLink/ScheduleIcalLink.module.css
create mode 100644 grafana-plugin/src/containers/ScheduleIcalLink/ScheduleIcalLink.tsx
create mode 100644 grafana-plugin/src/containers/SlackIntegrationButton/SlackIntegrationButton.tsx
create mode 100644 grafana-plugin/src/containers/TelegramIntegrationButton/TelegramIntegrationButton.module.css
create mode 100644 grafana-plugin/src/containers/TelegramIntegrationButton/TelegramIntegrationButton.tsx
create mode 100644 grafana-plugin/src/containers/TemplatePreview/TemplatePreview.module.css
create mode 100644 grafana-plugin/src/containers/TemplatePreview/TemplatePreview.tsx
create mode 100644 grafana-plugin/src/containers/UserSettings/UserSettings.module.css
create mode 100644 grafana-plugin/src/containers/UserSettings/UserSettings.tsx
create mode 100644 grafana-plugin/src/containers/UserSettings/UserSettings.types.ts
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/connectors/ICalConnector.tsx
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/connectors/MobileAppConnector.tsx
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/connectors/PhoneConnector.tsx
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/connectors/SlackConnector.tsx
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/connectors/TelegramConnector.tsx
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/connectors/index.module.css
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/connectors/index.tsx
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/index.module.css
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/index.tsx
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/tabs/NotificationSettingsTab.tsx
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/tabs/PhoneVerification/PhoneVerification.module.css
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/tabs/PhoneVerification/PhoneVerification.tsx
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/tabs/SlackTab/SlackTab.module.css
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/tabs/SlackTab/SlackTab.tsx
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/tabs/TelegramInfo/TelegramInfo.module.css
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/tabs/TelegramInfo/TelegramInfo.tsx
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/tabs/UserInfoTab/UserInfoTab.module.css
create mode 100644 grafana-plugin/src/containers/UserSettings/parts/tabs/UserInfoTab/UserInfoTab.tsx
create mode 100644 grafana-plugin/src/containers/UserSummary/UserSummary.tsx
create mode 100644 grafana-plugin/src/containers/UserTooltip/UserTooltip.module.css
create mode 100644 grafana-plugin/src/containers/UserTooltip/UserTooltip.tsx
create mode 100644 grafana-plugin/src/containers/WithPermissionControl/WithPermissionControl.module.css
create mode 100644 grafana-plugin/src/containers/WithPermissionControl/WithPermissionControl.tsx
create mode 100644 grafana-plugin/src/declare/index.d.ts
create mode 100644 grafana-plugin/src/icons/grafana-icon.svg
create mode 100644 grafana-plugin/src/icons/heart-green.svg
create mode 100644 grafana-plugin/src/icons/heart-red.svg
create mode 100644 grafana-plugin/src/icons/index.tsx
create mode 100644 grafana-plugin/src/img/logo.svg
create mode 100644 grafana-plugin/src/img/screenshot.png
create mode 100644 grafana-plugin/src/img/slack_workspace_choose_attention.png
create mode 100644 grafana-plugin/src/img/telegram_discussion.png
create mode 100644 grafana-plugin/src/index.css
create mode 100644 grafana-plugin/src/index.d.ts
create mode 100644 grafana-plugin/src/interceptors/index.ts
create mode 100644 grafana-plugin/src/models/action.ts
create mode 100644 grafana-plugin/src/models/alert_receive_channel.ts
create mode 100644 grafana-plugin/src/models/alert_receive_channel/alert_receive_channel.helpers.ts
create mode 100644 grafana-plugin/src/models/alert_receive_channel/alert_receive_channel.ts
create mode 100644 grafana-plugin/src/models/alert_receive_channel/alert_receive_channel.types.ts
create mode 100644 grafana-plugin/src/models/alert_receive_channel_filters/alert_receive_channel_filters.ts
create mode 100644 grafana-plugin/src/models/alert_templates.ts
create mode 100644 grafana-plugin/src/models/alertgroup/alertgroup.ts
create mode 100644 grafana-plugin/src/models/alertgroup/alertgroup.types.ts
create mode 100644 grafana-plugin/src/models/api_key.ts
create mode 100644 grafana-plugin/src/models/api_token/api_token.ts
create mode 100644 grafana-plugin/src/models/api_token/api_token.types.ts
create mode 100644 grafana-plugin/src/models/base_store.ts
create mode 100644 grafana-plugin/src/models/card.ts
create mode 100644 grafana-plugin/src/models/channel.ts
create mode 100644 grafana-plugin/src/models/channel_filter.ts
create mode 100644 grafana-plugin/src/models/channel_filter/channel_filter.types.ts
create mode 100644 grafana-plugin/src/models/curler/curler.ts
create mode 100644 grafana-plugin/src/models/curler/curler.types.ts
create mode 100644 grafana-plugin/src/models/current_subscription.ts
create mode 100644 grafana-plugin/src/models/current_subscription/current_subscription.ts
create mode 100644 grafana-plugin/src/models/current_subscription/current_subscription.types.ts
create mode 100644 grafana-plugin/src/models/escalation_chain/escalation_chain.ts
create mode 100644 grafana-plugin/src/models/escalation_chain/escalation_chain.types.ts
create mode 100644 grafana-plugin/src/models/escalation_policy.ts
create mode 100644 grafana-plugin/src/models/escalation_policy/escalation_policy.helpers.ts
create mode 100644 grafana-plugin/src/models/escalation_policy/escalation_policy.ts
create mode 100644 grafana-plugin/src/models/escalation_policy/escalation_policy.types.ts
create mode 100644 grafana-plugin/src/models/global_setting/global_setting.ts
create mode 100644 grafana-plugin/src/models/global_setting/global_setting.types.ts
create mode 100644 grafana-plugin/src/models/grafana_team/grafana_team.ts
create mode 100644 grafana-plugin/src/models/grafana_team/grafana_team.types.ts
create mode 100644 grafana-plugin/src/models/heartbeat/heartbeat.ts
create mode 100644 grafana-plugin/src/models/heartbeat/heartbeat.types.ts
create mode 100644 grafana-plugin/src/models/integrations_list.ts
create mode 100644 grafana-plugin/src/models/leader.ts
create mode 100644 grafana-plugin/src/models/maintenance/helpers.ts
create mode 100644 grafana-plugin/src/models/maintenance/maintenance.ts
create mode 100644 grafana-plugin/src/models/maintenance/maintenance.types.ts
create mode 100644 grafana-plugin/src/models/notification_policy.ts
create mode 100644 grafana-plugin/src/models/notify_by.ts
create mode 100644 grafana-plugin/src/models/organization_log/organization_log.ts
create mode 100644 grafana-plugin/src/models/organization_log/organization_log.types.ts
create mode 100644 grafana-plugin/src/models/outgoing_webhook/outgoing_webhook.ts
create mode 100644 grafana-plugin/src/models/outgoing_webhook/outgoing_webhook.types.ts
create mode 100644 grafana-plugin/src/models/resolution_note/resolution_note.ts
create mode 100644 grafana-plugin/src/models/resolution_note/resolution_note.types.ts
create mode 100644 grafana-plugin/src/models/schedule.ts
create mode 100644 grafana-plugin/src/models/schedule/schedule.ts
create mode 100644 grafana-plugin/src/models/schedule/schedule.types.ts
create mode 100644 grafana-plugin/src/models/slack/slack.ts
create mode 100644 grafana-plugin/src/models/slack/slack.types.ts
create mode 100644 grafana-plugin/src/models/slack_channel/slack_channel.config.ts
create mode 100644 grafana-plugin/src/models/slack_channel/slack_channel.helpers.ts
create mode 100644 grafana-plugin/src/models/slack_channel/slack_channel.ts
create mode 100644 grafana-plugin/src/models/slack_channel/slack_channel.types.ts
create mode 100644 grafana-plugin/src/models/team.ts
create mode 100644 grafana-plugin/src/models/team/team.ts
create mode 100644 grafana-plugin/src/models/team/team.types.ts
create mode 100644 grafana-plugin/src/models/telegram_channel/telegram_channel.helpers.ts
create mode 100644 grafana-plugin/src/models/telegram_channel/telegram_channel.ts
create mode 100644 grafana-plugin/src/models/telegram_channel/telegram_channel.types.ts
create mode 100644 grafana-plugin/src/models/user.ts
create mode 100644 grafana-plugin/src/models/user/user.config.ts
create mode 100644 grafana-plugin/src/models/user/user.helpers.tsx
create mode 100644 grafana-plugin/src/models/user/user.ts
create mode 100644 grafana-plugin/src/models/user/user.types.ts
create mode 100644 grafana-plugin/src/models/user_group/user_group.ts
create mode 100644 grafana-plugin/src/models/user_group/user_group.types.ts
create mode 100644 grafana-plugin/src/models/wait_delay.ts
create mode 100644 grafana-plugin/src/models/webinar/webinar.ts
create mode 100644 grafana-plugin/src/models/webinar/webinar.types.ts
create mode 100644 grafana-plugin/src/module.ts
create mode 100644 grafana-plugin/src/network/index.ts
create mode 100644 grafana-plugin/src/pages/chat-ops/ChatOps.module.css
create mode 100644 grafana-plugin/src/pages/chat-ops/ChatOps.tsx
create mode 100644 grafana-plugin/src/pages/chat-ops/ChatOps.types.ts
create mode 100644 grafana-plugin/src/pages/chat-ops/parts/index.tsx
create mode 100644 grafana-plugin/src/pages/chat-ops/parts/tabs/SlackSettings/SlackSettings.module.css
create mode 100644 grafana-plugin/src/pages/chat-ops/parts/tabs/SlackSettings/SlackSettings.tsx
create mode 100644 grafana-plugin/src/pages/chat-ops/parts/tabs/TelegramSettings/TelegramSettings.module.css
create mode 100644 grafana-plugin/src/pages/chat-ops/parts/tabs/TelegramSettings/TelegramSettings.tsx
create mode 100644 grafana-plugin/src/pages/escalation-chains/EscalationChains.module.css
create mode 100644 grafana-plugin/src/pages/escalation-chains/EscalationChains.tsx
create mode 100644 grafana-plugin/src/pages/incident/Incident.helpers.tsx
create mode 100644 grafana-plugin/src/pages/incident/Incident.module.css
create mode 100644 grafana-plugin/src/pages/incident/Incident.tsx
create mode 100644 grafana-plugin/src/pages/incidents/Incidents.module.css
create mode 100644 grafana-plugin/src/pages/incidents/Incidents.tsx
create mode 100644 grafana-plugin/src/pages/incidents/parts/SilenceDropdown.tsx
create mode 100644 grafana-plugin/src/pages/index.ts
create mode 100644 grafana-plugin/src/pages/integrations/Integrations.module.css
create mode 100644 grafana-plugin/src/pages/integrations/Integrations.tsx
create mode 100644 grafana-plugin/src/pages/livesettings/LiveSettings.config.ts
create mode 100644 grafana-plugin/src/pages/livesettings/LiveSettings.helpers.ts
create mode 100644 grafana-plugin/src/pages/livesettings/LiveSettings.module.css
create mode 100644 grafana-plugin/src/pages/livesettings/LiveSettingsPage.tsx
create mode 100644 grafana-plugin/src/pages/maintenance/Maintenance.module.css
create mode 100644 grafana-plugin/src/pages/maintenance/Maintenance.tsx
create mode 100644 grafana-plugin/src/pages/migration-tool/MigrationTool.module.css
create mode 100644 grafana-plugin/src/pages/migration-tool/MigrationTool.tsx
create mode 100644 grafana-plugin/src/pages/migration-tool/img/api-tokens.png
create mode 100644 grafana-plugin/src/pages/organization-logs/OrganizationLog.module.css
create mode 100644 grafana-plugin/src/pages/organization-logs/OrganizationLog.tsx
create mode 100644 grafana-plugin/src/pages/outgoing_webhooks/OutgoingWebhooks.module.css
create mode 100644 grafana-plugin/src/pages/outgoing_webhooks/OutgoingWebhooks.tsx
create mode 100644 grafana-plugin/src/pages/schedules/Schedules.helpers.ts
create mode 100644 grafana-plugin/src/pages/schedules/Schedules.module.css
create mode 100644 grafana-plugin/src/pages/schedules/Schedules.tsx
create mode 100644 grafana-plugin/src/pages/settings/SettingsPage.module.css
create mode 100644 grafana-plugin/src/pages/settings/SettingsPage.tsx
create mode 100644 grafana-plugin/src/pages/test/Test.module.css
create mode 100644 grafana-plugin/src/pages/test/Test.tsx
create mode 100644 grafana-plugin/src/pages/users/Users.helpers.ts
create mode 100644 grafana-plugin/src/pages/users/Users.module.css
create mode 100644 grafana-plugin/src/pages/users/Users.tsx
create mode 100644 grafana-plugin/src/plugin.json
create mode 100644 grafana-plugin/src/services/experimentManager.ts
create mode 100644 grafana-plugin/src/services/googleTagManager.ts
create mode 100644 grafana-plugin/src/services/mixpanel.ts
create mode 100644 grafana-plugin/src/services/urlManager.ts
create mode 100644 grafana-plugin/src/state/features.ts
create mode 100644 grafana-plugin/src/state/helpers.ts
create mode 100644 grafana-plugin/src/state/incidents.ts
create mode 100644 grafana-plugin/src/state/index.ts
create mode 100644 grafana-plugin/src/state/plugin.ts
create mode 100644 grafana-plugin/src/state/rootBaseStore.ts
create mode 100644 grafana-plugin/src/state/types.ts
create mode 100644 grafana-plugin/src/state/useStore.ts
create mode 100644 grafana-plugin/src/state/userAction.ts
create mode 100644 grafana-plugin/src/state/withStore.tsx
create mode 100644 grafana-plugin/src/types.ts
create mode 100644 grafana-plugin/src/utils/consts.ts
create mode 100644 grafana-plugin/src/utils/datetime.ts
create mode 100644 grafana-plugin/src/utils/hooks.ts
create mode 100644 grafana-plugin/src/utils/index.ts
create mode 100644 grafana-plugin/src/utils/loadCss.ts
create mode 100644 grafana-plugin/src/utils/localStorage.ts
create mode 100644 grafana-plugin/src/utils/sanitize.ts
create mode 100644 grafana-plugin/src/utils/url.ts
create mode 100644 grafana-plugin/src/vars.css
create mode 100644 grafana-plugin/tools/eslint-rules/no-relative-import-paths.js
create mode 100644 grafana-plugin/tools/plop/generators/appendReadmeFile.js
create mode 100644 grafana-plugin/tools/plop/generators/createComponentFiles.js
create mode 100644 grafana-plugin/tools/plop/generators/createContainerFiles.js
create mode 100644 grafana-plugin/tools/plop/generators/createModelFiles.js
create mode 100644 grafana-plugin/tools/plop/helpers/configNeeded.js
create mode 100644 grafana-plugin/tools/plop/prompts/componentPrompts.js
create mode 100644 grafana-plugin/tools/plop/prompts/containerPrompts.js
create mode 100644 grafana-plugin/tools/plop/prompts/modelPrompts.js
create mode 100644 grafana-plugin/tools/plop/prompts/readmePrompts.js
create mode 100644 grafana-plugin/tools/plop/templates/BuildInfo.md.hbs
create mode 100644 grafana-plugin/tools/plop/templates/Component/ClassComponent.tsx.hbs
create mode 100644 grafana-plugin/tools/plop/templates/Component/Component.module.css.hbs
create mode 100644 grafana-plugin/tools/plop/templates/Component/FunctionalComponent.tsx.hbs
create mode 100644 grafana-plugin/tools/plop/templates/Container/ClassComponent.tsx.hbs
create mode 100644 grafana-plugin/tools/plop/templates/Container/Component.module.css.hbs
create mode 100644 grafana-plugin/tools/plop/templates/Container/FunctionalComponent.tsx.hbs
create mode 100644 grafana-plugin/tools/plop/templates/Model/BaseModel.ts.hbs
create mode 100644 grafana-plugin/tools/plop/templates/Model/BaseModel.types.ts.hbs
create mode 100644 grafana-plugin/tsconfig.json
create mode 100644 grafana-plugin/webpack.config.js
create mode 100644 grafana-plugin/yarn.lock
create mode 100644 screenshot.png
create mode 100644 tools/image-tag.sh
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000..e541b3d4f1
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,8 @@
+venv/*
+venv2.7/*
+.DS_Store
+frontend/node_modules
+frontend/build
+package-lock.json
+./engine/extensions
+.env
\ No newline at end of file
diff --git a/.drone.yml b/.drone.yml
new file mode 100644
index 0000000000..1f37c6c9c4
--- /dev/null
+++ b/.drone.yml
@@ -0,0 +1,230 @@
+kind: pipeline
+type: docker
+name: Build and Release
+
+steps:
+ - name: Build Plugin
+ image: node:14.6.0-stretch
+ commands:
+ - apt-get update
+ - apt-get --assume-yes install jq
+ - cd grafana-plugin/
+ - if [ -z "$DRONE_TAG" ]; then echo "No tag, not modifying version"; else jq '.version="${DRONE_TAG}"' package.json > package.new && mv package.new package.json && jq '.version' package.json; fi
+ - yarn --network-timeout 500000
+ - yarn plop "Append build info" "${DRONE_TAG}" "${DRONE_BRANCH}" "${DRONE_COMMIT}"
+ - yarn build
+ - ls ./
+
+ - name: Sign and Package Plugin
+ image: node:14.6.0-stretch
+ environment:
+ GRAFANA_API_KEY:
+ from_secret: gcom_plugin_publisher_api_key
+ depends_on:
+ - Build Plugin
+ commands:
+ - apt-get update
+ - apt-get install zip
+ - cd grafana-plugin
+ - yarn sign
+ - yarn ci-build:finish
+ - yarn ci-package
+ - cd ci/dist
+ - zip -r grafana-oncall-app-${DRONE_BRANCH}-${DRONE_BUILD_NUMBER}.zip ./grafana-oncall-app
+ - if [ -z "$DRONE_TAG" ]; then echo "No tag, skipping archive"; else cp grafana-oncall-app-${DRONE_BRANCH}-${DRONE_BUILD_NUMBER}.zip grafana-oncall-app-${DRONE_TAG}.zip; fi
+
+ - name: Publish Plugin to GCS (release)
+ image: plugins/gcs
+ settings:
+ acl: allUsers:READER
+ source: grafana-plugin/ci/dist/grafana-oncall-app-${DRONE_TAG}.zip
+ target: grafana-oncall-app/releases/grafana-oncall-app-${DRONE_TAG}.zip
+ token:
+ from_secret: gcs_oncall_publisher_key
+ depends_on:
+ - Sign and Package Plugin
+ when:
+ ref:
+ - refs/tags/v*.*.*
+
+ - name: Publish Plugin to Github (release)
+ image: plugins/github-release
+ settings:
+ api_key:
+ from_secret: gh_token
+ files: grafana-plugin/ci/dist/grafana-oncall-app-${DRONE_TAG}.zip
+ title: ${DRONE_TAG}
+ depends_on:
+ - Sign and Package Plugin
+ when:
+ ref:
+ - refs/tags/v*.*.*
+
+ - name: Publish Plugin to grafana.com (release)
+ image: curlimages/curl:7.73.0
+ environment:
+ GRAFANA_API_KEY:
+ from_secret: gcom_plugin_publisher_api_key
+ commands:
+ - "curl -f -s -H \"Authorization: Bearer $${GRAFANA_API_KEY}\" -d \"download[any][url]=https://storage.googleapis.com/grafana-oncall-app/releases/grafana-oncall-app-${DRONE_TAG}.zip\" -d \"download[any][md5]=$$(curl -sL https://storage.googleapis.com/grafana-oncall-app/releases/grafana-oncall-app-${DRONE_TAG}.zip | md5sum | cut -d' ' -f1)\" -d url=https://github.com/grafana/oncall/grafana-plugin https://grafana.com/api/plugins"
+ depends_on:
+ - Publish Plugin to GCS (release)
+ - Publish Plugin to Github (release)
+ when:
+ ref:
+ - refs/tags/v*.*.*
+
+ - name: Lint Backend
+ image: python:3.9
+ environment:
+ DJANGO_SETTINGS_MODULE: settings.ci-test
+ commands:
+ - pip install $(grep "pre-commit" engine/requirements.txt)
+ - pre-commit run isort --all-files
+ - pre-commit run black --all-files
+ - pre-commit run flake8 --all-files
+
+ - name: Test Backend
+ image: python:3.9
+ environment:
+ DJANGO_SETTINGS_MODULE: settings.ci-test
+ SLACK_CLIENT_OAUTH_ID: 1
+ commands:
+ - apt-get update && apt-get install -y netcat
+ - cd engine/
+ - mkdir sqlite_data
+ - pip install -r requirements.txt
+ - pytest --ds=settings.ci-test
+ - rm -rf sqlite_data
+ depends_on:
+ - rabbit_test
+
+ - name: Image Tag
+ image: alpine
+ commands:
+ - apk add --no-cache bash git sed
+ - git fetch origin --tags
+ - chmod +x ./tools/image-tag.sh
+ - echo $(./tools/image-tag.sh)
+ - echo $(./tools/image-tag.sh) > .tags
+ - if [ -z "$DRONE_TAG" ]; then echo "No tag, not modifying version"; else sed "0,/VERSION.*/ s/VERSION.*/VERSION = \"${DRONE_TAG}\"/g" engine/settings/base.py > engine/settings/base.temp && mv engine/settings/base.temp engine/settings/base.py; fi
+ - cat engine/settings/base.py | grep VERSION | head -1
+ when:
+ ref:
+ - refs/heads/dev
+ - refs/tags/v*.*.*
+
+ - name: Build and Push Engine Docker Image Backend to GCR
+ image: plugins/docker
+ settings:
+ repo: us.gcr.io/kubernetes-dev/oncall-engine
+ dockerfile: engine/Dockerfile
+ context: engine/
+ config:
+ from_secret: gcr_admin
+ depends_on:
+ - Lint Backend
+ - Test Backend
+ - Image Tag
+
+ - name: Build and Push Engine Docker Image Backend to Dockerhub
+ image: plugins/docker
+ settings:
+ repo: grafana/oncall
+ dockerfile: engine/Dockerfile
+ context: engine/
+ password:
+ from_secret: docker_password
+ username:
+ from_secret: docker_username
+ depends_on:
+ - Lint Backend
+ - Test Backend
+ - Image Tag
+ when:
+ ref:
+ - refs/heads/dev
+ - refs/tags/v*.*.*
+
+# Services for Test Backend
+services:
+ - name: rabbit_test
+ image: rabbitmq:3.7.19
+ environment:
+ RABBITMQ_DEFAULT_USER: rabbitmq
+ RABBITMQ_DEFAULT_PASS: rabbitmq
+
+trigger:
+ event:
+ - push
+ - tag
+
+---
+# Secret for pulling docker images.
+kind: secret
+name: dockerconfigjson
+get:
+ path: secret/data/common/gcr
+ name: .dockerconfigjson
+
+---
+# Secret for pushing docker images.
+kind: secret
+name: gcr_admin
+get:
+ path: infra/data/ci/gcr-admin
+ name: .dockerconfigjson
+
+---
+# Secret for GitHub
+get:
+ name: pat
+ path: infra/data/ci/github/grafanabot
+kind: secret
+name: gh_token
+
+---
+# Slack webhook
+get:
+ name: slack-plugin
+ path: secret/data/common/oncall/drone
+kind: secret
+name: slack_webhook
+
+---
+# GCOM plugin publisher
+get:
+ name: gcom-plugin-publisher
+ path: secret/data/common/oncall/drone
+kind: secret
+name: gcom_plugin_publisher_api_key
+
+---
+# GCS bucket
+get:
+ name: credentials.json
+ path: secret/data/common/oncall/gcs-oncall-drone-publisher
+kind: secret
+name: gcs_oncall_publisher_key
+
+---
+# Dockerhub
+get:
+ name: username
+ path: infra/data/ci/docker_hub
+kind: secret
+name: docker_username
+---
+get:
+ name: password
+ path: infra/data/ci/docker_hub
+kind: secret
+name: docker_password
+
+---
+# Drone
+get:
+ name: machine-user-token
+ path: infra/data/ci/drone
+kind: secret
+name: drone_token
\ No newline at end of file
diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000000..6be42e97bd
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,31 @@
+SLACK_CLIENT_OAUTH_ID=
+SLACK_CLIENT_OAUTH_SECRET=
+SLACK_API_TOKEN=
+SLACK_API_TOKEN_COMMON=
+SLACK_SLASH_COMMAND_NAME=/oncall
+
+TELEGRAM_WEBHOOK_URL=
+TELEGRAM_TOKEN=
+
+TWILIO_ACCOUNT_SID=
+TWILIO_VERIFY_SERVICE_SID=
+TWILIO_AUTH_TOKEN=
+TWILIO_NUMBER=
+
+SENDGRID_SECRET_KEY=
+SENDGRID_INBOUND_EMAIL_DOMAIN=
+SENDGRID_API_KEY=
+SENDGRID_FROM_EMAIL=
+
+DJANGO_SETTINGS_MODULE=settings.dev
+SECRET_KEY=jkashdkjashdkjh
+BASE_URL=http://localhost:8000
+
+FEATURE_TELEGRAM_INTEGRATION_ENABLED=
+FEATURE_SLACK_INTEGRATION_ENABLED=True
+FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED=
+
+SLACK_INSTALL_RETURN_REDIRECT_HOST=http://localhost:8000
+SOCIAL_AUTH_REDIRECT_IS_HTTPS=False
+
+GRAFANA_INCIDENT_STATIC_API_KEY=
diff --git a/.github/issue_and_pr_commands.json b/.github/issue_and_pr_commands.json
new file mode 100644
index 0000000000..a1f4ec2178
--- /dev/null
+++ b/.github/issue_and_pr_commands.json
@@ -0,0 +1,10 @@
+[
+ {
+ "type": "label",
+ "name": "type/docs",
+ "action": "addToProject",
+ "addToProject": {
+ "url": "https://github.com/orgs/grafana/projects/69"
+ }
+ }
+]
diff --git a/.github/workflows/backend-ci.yml b/.github/workflows/backend-ci.yml
new file mode 100644
index 0000000000..b6095f99aa
--- /dev/null
+++ b/.github/workflows/backend-ci.yml
@@ -0,0 +1,39 @@
+name: backend-ci
+
+on:
+ push:
+ branches:
+ - main
+ pull_request:
+
+jobs:
+ lint:
+ name: Backend Checks
+ runs-on: ubuntu-latest
+ container: python:3.9
+ env:
+ DJANGO_SETTINGS_MODULE: settings.ci-test
+ SLACK_CLIENT_OAUTH_ID: 1
+ services:
+ rabbit_test:
+ image: rabbitmq:3.7.19
+ env:
+ RABBITMQ_DEFAULT_USER: rabbitmq
+ RABBITMQ_DEFAULT_PASS: rabbitmq
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Lint Backend
+ run: |
+ pip install $(grep "pre-commit" engine/requirements.txt)
+ pre-commit run isort --all-files
+ pre-commit run black --all-files
+ pre-commit run flake8 --all-files
+
+ - name: Test Backend
+ run: |
+ apt-get update && apt-get install -y netcat
+ cd engine/
+ mkdir sqlite_data
+ pip install -r requirements.txt
+ pytest --ds=settings.ci-test
\ No newline at end of file
diff --git a/.github/workflows/frontend-ci.yml b/.github/workflows/frontend-ci.yml
new file mode 100644
index 0000000000..b76cf24180
--- /dev/null
+++ b/.github/workflows/frontend-ci.yml
@@ -0,0 +1,21 @@
+name: frontend-ci
+
+on:
+ push:
+ branches:
+ - main
+ pull_request:
+
+jobs:
+ lint:
+ name: Frontend Checks
+ runs-on: ubuntu-latest
+ env:
+ DJANGO_SETTINGS_MODULE: settings.ci-test
+ steps:
+ - uses: actions/checkout@v2
+ - name: Build Frontend Plugin
+ run: |
+ cd grafana-plugin/
+ yarn --network-timeout 500000
+ yarn build
diff --git a/.github/workflows/issue_commands.yml b/.github/workflows/issue_commands.yml
new file mode 100644
index 0000000000..2279e1d0c2
--- /dev/null
+++ b/.github/workflows/issue_commands.yml
@@ -0,0 +1,23 @@
+name: Run commands when issues are labeled
+on:
+ issues:
+ types: [labeled]
+ pull_request:
+ types: [labeled]
+jobs:
+ main:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout Actions
+ uses: actions/checkout@v2
+ with:
+ repository: "grafana/grafana-github-actions"
+ path: ./actions
+ ref: main
+ - name: Install Actions
+ run: npm install --production --prefix ./actions
+ - name: Run Commands
+ uses: ./actions/commands
+ with:
+ token: ${{secrets.GH_BOT_ACCESS_TOKEN}}
+ configPath: issue_and_pr_commands
\ No newline at end of file
diff --git a/.github/workflows/publish_docs.yml b/.github/workflows/publish_docs.yml
new file mode 100644
index 0000000000..2018956166
--- /dev/null
+++ b/.github/workflows/publish_docs.yml
@@ -0,0 +1,40 @@
+name: publish_docs
+
+on:
+ pull_request:
+ push:
+ branches:
+ - main
+ paths:
+ - 'docs/sources/**'
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v1
+ - name: Build Website
+ run: |
+ docker run -v ${PWD}/sources:/hugo/content/docs/amixr --rm grafana/docs-base:latest /bin/bash -c 'make hugo'
+ sync:
+ runs-on: ubuntu-latest
+ needs: test
+ if: github.ref == 'refs/heads/main'
+ steps:
+ - uses: actions/checkout@v1
+ - run: git clone --single-branch --no-tags --depth 1 -b master https://grafanabot:${{ secrets.GH_BOT_ACCESS_TOKEN }}@github.com/grafana/website-sync ./.github/actions/website-sync
+ - name: publish-to-git
+ uses: ./.github/actions/website-sync
+ id: publish
+ with:
+ repository: grafana/website
+ branch: master
+ host: github.com
+ github_pat: '${{ secrets.GH_BOT_ACCESS_TOKEN }}'
+ source_folder: docs/sources
+ target_folder: content/docs/amixr/v0.0.39
+ - shell: bash
+ run: |
+ test -n "${{ steps.publish.outputs.commit_hash }}"
+ test -n "${{ steps.publish.outputs.working_directory }}"
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000..ae81aab53d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,47 @@
+# Backend
+*/db.sqlite3
+*.pyc
+venv
+.env
+.vscode
+dump.rdb
+.idea
+engine/celerybeat-schedule.db
+engine/sqlite_data
+jupiter_playbooks/*
+engine/reports/*.csv
+engine/jupiter_playbooks/*
+
+# Frontend dependencies
+node_modules
+/.pnp
+.pnp.js
+
+# testing
+/coverage
+
+# production
+/build
+
+# misc
+.DS_Store
+.env.local
+.env.development.local
+.env.test.local
+.env.production.local
+.swp
+
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+
+scripts/gcom.token
+scripts/gcom_grafana.token
+scripts/gcom_raintank.token
+
+engine/extensions/
+
+grafana-plugin/frontend_enterprise
+
+uwsgi-local.ini
+celerybeat-schedule
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000000..e2dbf2defd
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,46 @@
+repos:
+ - repo: https://github.com/pycqa/isort
+ rev: 5.9.3
+ hooks:
+ - id: isort
+ files: ^engine
+ args: [--settings-file=engine/pyproject.toml, --filter-files]
+
+ - repo: https://github.com/psf/black
+ rev: 22.3.0
+ hooks:
+ - id: black
+ files: ^engine
+ args: [--config=engine/pyproject.toml]
+
+ - repo: https://github.com/pycqa/flake8
+ rev: 3.9.2
+ hooks:
+ - id: flake8
+ files: ^engine
+ args: [--config=engine/tox.ini]
+ additional_dependencies:
+ - flake8-tidy-imports
+
+ - repo: https://github.com/pre-commit/mirrors-eslint
+ rev: v7.21.0
+ hooks:
+ - id: eslint
+ entry: bash -c 'cd grafana-plugin && eslint --fix ${@/grafana-plugin\//}' --
+ types: [file]
+ files: ^grafana-plugin/src/.*\.(js|jsx|ts|tsx)$
+ additional_dependencies:
+ - eslint@7.21.0
+ - eslint-plugin-import@^2.25.4
+ - eslint-plugin-rulesdir@^0.2.1
+
+ - repo: https://github.com/thibaudcolas/pre-commit-stylelint
+ rev: v13.13.1
+ hooks:
+ - id: stylelint
+ entry: bash -c 'cd grafana-plugin && stylelint --fix ${@/grafana-plugin\//}' --
+ types: [file]
+ files: ^grafana-plugin/src/.*\.css$
+ additional_dependencies:
+ - stylelint@^13.13.1
+ - stylelint-config-standard@^22.0.0
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000000..940ce36a15
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,3 @@
+# Change Log
+
+## 1.0.0 (2022-02-02)
diff --git a/DEVELOPER.md b/DEVELOPER.md
new file mode 100644
index 0000000000..fd4da888d5
--- /dev/null
+++ b/DEVELOPER.md
@@ -0,0 +1,385 @@
+* [Developer quickstart](#developer-quickstart)
+ * [Backend setup](#backend-setup)
+ * [Frontend setup](#frontend-setup)
+ * [Slack application setup](#slack-application-setup)
+* [Troubleshooting](#troubleshooting)
+ * [ld: library not found for -lssl](#ld-library-not-found-for--lssl)
+ * [Could not build wheels for cryptography which use PEP 517 and cannot be installed directly](#could-not-build-wheels-for-cryptography-which-use-pep-517-and-cannot-be-installed-directly)
+ * [django.db.utils.OperationalError: (1366, "Incorrect string value ...")](#djangodbutilsoperationalerror-1366-incorrect-string-value-)
+ * [Empty queryset when filtering against datetime field](#empty-queryset-when-filtering-against-datetime-field)
+* [Hints](#hints)
+ * [Building the all-in-one docker container](#building-the-all-in-one-docker-container)
+ * [Running Grafana with plugin (frontend) folder mounted for dev purposes](#running-grafana-with-plugin-frontend-folder-mounted-for-dev-purposes)
+ * [How to recreate the local database](#recreating-the-local-database)
+ * [Running tests locally](#running-tests-locally)
+* [IDE Specific Instructions](#ide-specific-instructions)
+ * [PyCharm](#pycharm)
+
+## Developer quickstart
+
+### Code style
+
+- [isort](https://github.com/PyCQA/isort), [black](https://github.com/psf/black) and [flake8](https://github.com/PyCQA/flake8) are used to format backend code
+- [eslint](https://eslint.org) and [stylelint](https://stylelint.io) are used to format frontend code
+- To run formatters and linters on all files: `pre-commit run --all-files`
+- To install pre-commit hooks: `pre-commit install`
+
+### Backend setup
+
+1. Start stateful services (RabbitMQ, Redis, Grafana with mounted plugin folder)
+```bash
+docker-compose -f developer-docker-compose.yml up -d
+```
+
+2. Prepare a python environment:
+```bash
+# Create and activate the virtual environment
+python3.9 -m venv venv && source venv/bin/activate
+
+# Verify that python has version 3.9.x
+python --version
+
+# Make sure you have latest pip and wheel support
+pip install -U pip wheel
+
+# Copy and check .env file.
+cp .env.example .env
+
+# Apply .env to current terminal.
+# For PyCharm it's better to use https://plugins.jetbrains.com/plugin/7861-envfile/
+export $(grep -v '^#' .env | xargs -0)
+
+# Install dependencies.
+# Hint: there is a known issue with uwsgi. It's not used in the local dev environment. Feel free to comment it in `engine/requirements.txt`.
+cd engine && pip install -r requirements.txt
+
+# Create folder for database
+mkdir sqlite_data
+
+# Migrate the DB:
+python manage.py migrate
+
+# Create user for django admin panel:
+python manage.py createsuperuser
+```
+
+
+3. Launch the backend:
+```bash
+# Http server:
+python manage.py runserver
+
+# Worker for background tasks(run it in the parallel terminal, don't forget to export .env there)
+python manage.py start_celery
+
+# Additionally you could launch the worker with periodic tasks launcher (99% you don't need this)
+celery -A engine beat -l info
+```
+
+4. All set! Check out internal API endpoints at http://localhost:8000/.
+
+
+### Frontend setup
+
+1. Make sure you have [NodeJS v.14+ < 17](https://nodejs.org/) and [yarn](https://yarnpkg.com/) installed.
+
+2. Install the dependencies with `yarn` and launch the frontend server (on port `3000` by default)
+```bash
+cd grafana-plugin
+yarn install
+yarn
+yarn watch
+```
+
+3. Ensure /grafana-plugin/provisioning has no grafana-plugin.yml
+
+4. Generate an invitation token:
+```bash
+cd engine;
+python manage.py issue_invite_for_the_frontend --override
+```
+... or use output of all-in-one docker container described in the README.md.
+
+5. Open Grafana in the browser http://localhost:3000 (login: oncall, password: oncall) notice OnCall Plugin is not enabled, navigate to Configuration->Plugins and click Grafana OnCall
+
+6. Some configuration fields will appear be available. Fill them out and click Initialize OnCall
+```
+OnCall API URL:
+http://host.docker.internal:8000
+
+OnCall Invitation Token (Single use token to connect Grafana instance):
+Response from the invite generator command (check above)
+
+Grafana URL (URL OnCall will use to talk to Grafana instance):
+http://localhost:3000
+```
+
+NOTE: you may not have `host.docker.internal` available, in that case you can get the
+host IP from inside the container by running:
+```bash
+/sbin/ip route|awk '/default/ { print $3 }'
+
+# Alternatively add host.docker.internal as an extra_host for grafana in developer-docker-compose.yml
+extra_hosts:
+ - "host.docker.internal:host-gateway"
+
+```
+
+### Slack application setup
+
+This instruction is also applicable if you set up self-hosted OnCall.
+
+1. Start a [localtunnel](https://github.com/localtunnel/localtunnel) reverse proxy to make oncall engine api accessible to slack (if you don't have OnCall backend accessible from https),
+```bash
+# Choose the unique prefix instead of pretty-turkey-83
+# Localtunnel will generate an url, e.g. https://pretty-turkey-83.loca.lt
+# it is referred as below
+lt --port 8000 -s pretty-turkey-83 --print-requests
+```
+
+2. [Create a Slack Workspace](https://slack.com/create) for development.
+
+3. Go to https://api.slack.com/apps and click Create New App button
+
+4. Select `From an app manifest` option and choose the right workspace
+
+5. Copy and paste the following block with the correct and fields
+
+
+ Click to expand!
+
+ ```yaml
+ _metadata:
+ major_version: 1
+ minor_version: 1
+ display_information:
+ name:
+ features:
+ app_home:
+ home_tab_enabled: true
+ messages_tab_enabled: true
+ messages_tab_read_only_enabled: false
+ bot_user:
+ display_name:
+ always_online: true
+ shortcuts:
+ - name: Create a new incident
+ type: message
+ callback_id: incident_create
+ description: Creates a new OnCall incident
+ - name: Add to postmortem
+ type: message
+ callback_id: add_postmortem
+ description: Add this message to postmortem
+ slash_commands:
+ - command: /oncall
+ url: /slack/interactive_api_endpoint/
+ description: oncall
+ should_escape: false
+ oauth_config:
+ redirect_urls:
+ - /api/internal/v1/complete/slack-install-free/
+ - /api/internal/v1/complete/slack-login/
+ scopes:
+ user:
+ - channels:read
+ - chat:write
+ - identify
+ - users.profile:read
+ bot:
+ - app_mentions:read
+ - channels:history
+ - channels:read
+ - chat:write
+ - chat:write.customize
+ - chat:write.public
+ - commands
+ - files:write
+ - groups:history
+ - groups:read
+ - im:history
+ - im:read
+ - im:write
+ - mpim:history
+ - mpim:read
+ - mpim:write
+ - reactions:write
+ - team:read
+ - usergroups:read
+ - usergroups:write
+ - users.profile:read
+ - users:read
+ - users:read.email
+ - users:write
+ settings:
+ event_subscriptions:
+ request_url: /slack/event_api_endpoint/
+ bot_events:
+ - app_home_opened
+ - app_mention
+ - channel_archive
+ - channel_created
+ - channel_deleted
+ - channel_rename
+ - channel_unarchive
+ - member_joined_channel
+ - message.channels
+ - message.im
+ - subteam_created
+ - subteam_members_changed
+ - subteam_updated
+ - user_change
+ interactivity:
+ is_enabled: true
+ request_url: /slack/interactive_api_endpoint/
+ org_deploy_enabled: false
+ socket_mode_enabled: false
+ ```
+
+
+6. Click `Install to workspace` button to generate the credentials
+
+6. Populate the environment with variables related to Slack
+
+ In your `.env` file, fill out the following variables:
+
+ ```
+ SLACK_CLIENT_OAUTH_ID = Basic Information -> App Credentials -> Client ID
+ SLACK_CLIENT_OAUTH_SECRET = Basic Information -> App Credentials -> Client Secret
+ SLACK_API_TOKEN = OAuth & Permissions -> Bot User OAuth Token
+ SLACK_INSTALL_RETURN_REDIRECT_HOST = https://pretty-turkey-83.loca.lt
+ ```
+
+ Don't forget to export variables from the `.env` file and restart the server!
+
+7. Edit `grafana-plugin/grafana-plugin.yml` to set `onCallApiUrl` fields with localtunnel url
+ ```
+ onCallApiUrl: https://pretty-turkey-83.loca.lt
+ ```
+
+ or set BASE_URL Env variable through web interface.
+
+8. Edit grafana-plugin/src/plugin.json to add `Bypass-Tunnel-Reminder` header section for all existing routes
+ > this headers required for the local development only, otherwise localtunnel blocks requests from grafana plugin
+
+ ```
+ {
+ "path": ...,
+ ...
+ "headers": [
+ ...
+ {
+ "name": "Bypass-Tunnel-Reminder",
+ "content": "True"
+ }
+ ]
+ },
+ ```
+9. Rebuild the plugin
+ ```
+ yarn watch
+ ```
+10. Restart grafana instance
+
+11. All set! Go to Slack and check if your application is functional.
+
+## Troubleshooting
+
+### ld: library not found for -lssl
+
+**Problem:**
+```
+pip install -r requirements.txt
+...
+ ld: library not found for -lssl
+ clang: error: linker command failed with exit code 1 (use -v to see invocation)
+ error: command 'gcc' failed with exit status 1
+...
+```
+**Solution:**
+
+```
+export LDFLAGS=-L/usr/local/opt/openssl/lib
+pip install -r requirements.txt
+```
+
+### Could not build wheels for cryptography which use PEP 517 and cannot be installed directly
+
+Happens on Apple Silicon
+
+**Problem:**
+```
+ build/temp.macosx-12-arm64-3.9/_openssl.c:575:10: fatal error: 'openssl/opensslv.h' file not found
+ #include
+ ^~~~~~~~~~~~~~~~~~~~
+ 1 error generated.
+ error: command '/usr/bin/clang' failed with exit code 1
+ ----------------------------------------
+ ERROR: Failed building wheel for cryptography
+```
+**Solution:**
+```
+LDFLAGS="-L$(brew --prefix openssl@1.1)/lib" CFLAGS="-I$(brew --prefix openssl@1.1)/include" pip install `cat requirements.txt | grep cryptography`
+```
+
+### django.db.utils.OperationalError: (1366, "Incorrect string value ...")
+
+**Problem:**
+```
+django.db.utils.OperationalError: (1366, "Incorrect string value: '\\xF0\\x9F\\x98\\x8A\\xF0\\x9F...' for column 'cached_name' at row 1")
+```
+
+**Solution:**
+
+Recreate the database with the correct encoding.
+
+ ### Grafana OnCall plugin does not show up in plugin list
+
+**Problem:**
+I've run `yarn watch` in `grafana_plugin` but I do not see Grafana OnCall in the list of plugins
+
+**Solution:**
+If it is the first time you have run `yarn watch` and it was run after starting Grafana in docker-compose; Grafana will not have detected a plugin to fix: `docker-compose -f developer-docker-compose.yml restart grafana`
+
+## Hints:
+
+### Building the all-in-one docker container
+
+```bash
+cd engine;
+docker build -t grafana/oncall-all-in-one -f Dockerfile.all-in-one .
+```
+
+### Running Grafana with plugin (frontend) folder mounted for dev purposes
+
+Do it only after you built frontend at least once! Also developer-docker-compose.yml has similar Grafana included.
+```bash
+docker run --rm -it -p 3000:3000 -v "$(pwd)"/grafana-plugin:/var/lib/grafana/plugins/grafana-plugin -e GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=grafana-oncall-app --name=grafana grafana/grafana:8.3.2
+```
+Credentials: admin/admin
+
+### Running tests locally
+
+```
+# in the engine directory, with the virtualenv activated
+pytest --ds=settings.dev
+```
+
+## IDE Specific Instructions
+
+### PyCharm
+1. Create venv and copy .env file
+ ```bash
+ python3.9 -m venv venv
+ cp .env.example .env
+ ```
+2. Open the project in PyCharm
+3. Settings → Project OnCall
+ - In Python Interpreter click the gear and create a new Virtualenv from existing environment selecting the venv created in Step 1.
+ - In Project Structure make sure the project root is the content root and add /engine to Sources
+4. Under Settings → Languages & Frameworks → Django
+ - Enable Django support
+ - Set Django project root to /engine
+ - Set Settings to settings/dev.py
+5. Create a new Django Server run configuration to Run/Debug the engine
+ - Use a plugin such as EnvFile to load the .env file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000..be3f7b28e5
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,661 @@
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+ .
diff --git a/LICENSING.md b/LICENSING.md
new file mode 100644
index 0000000000..4e53ac0d85
--- /dev/null
+++ b/LICENSING.md
@@ -0,0 +1,17 @@
+# Licensing
+
+License names used in this document are as per [SPDX License List](https://spdx.org/licenses/).
+
+The default license for this project is [AGPL-3.0-only](LICENSE).
+
+## Apache-2.0
+
+The following directories and their subdirectories are licensed under Apache-2.0:
+
+```
+```
+
+The following directories and their subdirectories are licensed under their original upstream licenses:
+
+```
+```
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000..aa8300dcac
--- /dev/null
+++ b/README.md
@@ -0,0 +1,76 @@
+# Grafana OnCall Incident Response
+Grafana OnCall, cloud version of Grafana OnCall: https://grafana.com/products/cloud/
+
+Developer-friendly, incident response management with brilliant Slack integration.
+- Connect monitoring systems
+- Collect and analyze data
+- On-call rotation
+- Automatic escalation
+- Never miss alerts with calls and SMS
+
+![Grafana OnCall Screenshot](screenshot.png)
+
+## Getting Started
+OnCall consists of two parts:
+1. OnCall backend
+2. "Grafana OnCall" plugin you need to install in your Grafana
+
+### How to run OnCall backend
+1. An all-in-one image of OnCall is available on docker hub to run it:
+```bash
+docker run -it --name oncall-backend -p 8000:8000 grafana/oncall-all-in-one
+```
+
+2. When the image starts up you will see a message like this:
+```bash
+👋 This script will issue an invite token to securely connect the frontend.
+Maintainers will be happy to help in the slack channel #grafana-oncall: https://slack.grafana.com/
+Your invite token: , use it in the Grafana OnCall plugin.
+```
+
+3. If you started your container detached with -d check the log:
+```bash
+docker logs oncall-backend
+```
+
+### How to install "Grafana OnCall" Plugin and connect with a backend
+1. Open Grafana in your browser and login as an Admin
+2. Navigate to Configuration → Plugins
+3. Type Grafana OnCall into the "Search Grafana plugins" field
+4. Select the Grafana OnCall plugin and press the "Install" button
+5. On the Grafana OnCall Plugin page Enable the plugin and go to the Configuration tab you should see a status field with the message
+```
+OnCall has not been setup, configure & initialize below.
+```
+6. Fill in configuration fields using the token you got from the backend earlier, then press "Install Configuration"
+```
+OnCall API URL: (The URL & port used to access OnCall)
+http://host.docker.internal:8000
+
+OnCall Invitation Token (Single use token to connect Grafana instance):
+Invitation token from docker startup
+
+Grafana URL (URL OnCall will use to talk to this Grafana instance):
+http://localhost:3000 (or http://host.docker.internal:3000 if your grafana is running in Docker locally)
+```
+
+## Getting Help
+- `#grafana-oncall` channel at https://slack.grafana.com/
+- Grafana Labs community forum for OnCall: https://community.grafana.com
+- File an [issue](https://github.com/grafana/oncall/issues) for bugs, issues and feature suggestions.
+
+## Production Setup
+
+Looking for the production instructions? We're going to release them soon. Please join our Slack channel to be the first to know about them.
+
+## Further Reading
+- *Documentation* - [Grafana OnCall](https://grafana.com/docs/grafana-cloud/oncall/)
+- *Blog Post* - [Announcing Grafana OnCall, the easiest way to do on-call management](https://grafana.com/blog/2021/11/09/announcing-grafana-oncall/)
+- *Presentation* - [Deep dive into the Grafana, Prometheus, and Alertmanager stack for alerting and on-call management](https://grafana.com/go/observabilitycon/2021/alerting/?pg=blog)
+
+## FAQ
+
+- How do I generate a new invitation token to connect plugin with a backend?
+```bash
+docker exec oncall-backend python manage.py issue_invite_for_the_frontend --override
+```
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000000..b8697c164d
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,20 @@
+# Reporting security issues
+
+If you think you have found a security vulnerability, please send a report to [security@grafana.com](mailto:security@grafana.com). This address can be used for all of Grafana Labs's open source and commercial products (including but not limited to Grafana, Grafana Cloud, Grafana Enterprise, and grafana.com). We can accept only vulnerability reports at this address.
+
+Please encrypt your message to us; please use our PGP key. The key fingerprint is:
+
+F988 7BEA 027A 049F AE8E 5CAA D125 8932 BE24 C5CA
+
+The key is available from [keyserver.ubuntu.com](https://keyserver.ubuntu.com/pks/lookup?search=0xF9887BEA027A049FAE8E5CAAD1258932BE24C5CA&fingerprint=on&op=index).
+
+Grafana Labs will send you a response indicating the next steps in handling your report. After the initial reply to your report, the security team will keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance.
+
+**Important:** We ask you to not disclose the vulnerability before it have been fixed and announced, unless you received a response from the Grafana Labs security team that you can do so.
+
+## Security announcements
+
+We maintain a category on the community site called [Security Announcements](https://community.grafana.com/c/support/security-announcements),
+where we will post a summary, remediation, and mitigation details for any patch containing security fixes.
+
+You can also subscribe to email updates to this category if you have a grafana.com account and sign on to the community site or track updates via an [RSS feed](https://community.grafana.com/c/support/security-announcements.rss).
diff --git a/developer-docker-compose.yml b/developer-docker-compose.yml
new file mode 100644
index 0000000000..b24312d6da
--- /dev/null
+++ b/developer-docker-compose.yml
@@ -0,0 +1,69 @@
+version: '3.2'
+
+services:
+
+ mysql:
+ image: mariadb:10.2
+ platform: linux/x86_64
+ mem_limit: 500m
+ cpus: 0.5
+ command: --default-authentication-plugin=mysql_native_password
+ restart: always
+ ports:
+ - 3306:3306
+ environment:
+ MYSQL_ROOT_PASSWORD: local_dev_pwd
+ MYSQL_DATABASE: oncall_local_dev
+ healthcheck:
+ test: [ "CMD", "mysqladmin" ,"ping", "-h", "localhost" ]
+ timeout: 20s
+ retries: 10
+
+ redis:
+ image: redis
+ mem_limit: 100m
+ cpus: 0.1
+ restart: always
+ ports:
+ - 6379:6379
+
+ rabbit:
+ image: "rabbitmq:3.7.15-management"
+ mem_limit: 1000m
+ cpus: 0.5
+ environment:
+ RABBITMQ_DEFAULT_USER: "rabbitmq"
+ RABBITMQ_DEFAULT_PASS: "rabbitmq"
+ RABBITMQ_DEFAULT_VHOST: "/"
+ ports:
+ - 15672:15672
+ - 5672:5672
+
+ mysql-to-create-grafana-db:
+ image: mariadb:10.2
+ platform: linux/x86_64
+ command: bash -c "mysql -h mysql -uroot -plocal_dev_pwd -e 'CREATE DATABASE IF NOT EXISTS grafana CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;'"
+ depends_on:
+ mysql:
+ condition: service_healthy
+
+ grafana:
+ image: "grafana/grafana:8.3.2"
+ restart: always
+ mem_limit: 500m
+ cpus: 0.5
+ environment:
+ GF_DATABASE_TYPE: mysql
+ GF_DATABASE_HOST: mysql
+ GF_DATABASE_USER: root
+ GF_DATABASE_PASSWORD: local_dev_pwd
+ GF_SECURITY_ADMIN_USER: oncall
+ GF_SECURITY_ADMIN_PASSWORD: oncall
+ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS: grafana-oncall-app
+ volumes:
+ - ./grafana-plugin:/var/lib/grafana/plugins/grafana-plugin
+ ports:
+ - 3000:3000
+ depends_on:
+ mysql:
+ condition: service_healthy
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000000..f66259da32
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,11 @@
+IMAGE = grafana/docs-base:latest
+CONTENT_PATH = /hugo/content/docs/amixr/latest
+PORT = 3002:3002
+
+.PHONY: pull
+pull:
+ docker pull $(IMAGE)
+
+.PHONY: docs
+docs: pull
+ docker run -v $(shell pwd)/sources:$(CONTENT_PATH):Z -p $(PORT) --rm -it $(IMAGE)
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000000..8d702ceb8c
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,8 @@
+# Grafana Cloud Documentation
+
+Source for documentation at https://grafana.com/docs/amixr/
+
+## Preview the website
+
+Run `make docs`. This launches a preview of the website with the current grafana docs at `http://localhost:3002/docs/amixr/` which will refresh automatically when changes are made to content in the `sources` directory.
+Make sure Docker is running.
diff --git a/docs/sources/_index.md b/docs/sources/_index.md
new file mode 100644
index 0000000000..ec446b6f6d
--- /dev/null
+++ b/docs/sources/_index.md
@@ -0,0 +1,16 @@
++++
+title = "Grafana OnCall"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "OnCall", "irm"]
+weight = 1000
+aliases = ["/docs/grafana-cloud/oncall/"]
++++
+
+# Grafana OnCall
+
+You can use Grafana OnCall to connect to the monitoring systems of your Grafana Cloud integrations so that you can ensure alert notifications reach the right people, at the right time, using the right medium. Team members can review and manage alert group notifications directly in OnCall, or in supported platforms, like Telegram and Slack.
+
+When you integrate an alert monitoring system with Grafana OnCall, the alerts will create an alert group. This group will fire notifications that are sent according to escalation policies that you define in routes and escalation chains. You can configure escalation actions, and automatically notify users and groups through services like Slack, by text message, and by phone call. You can also use popular calendar services like Google Calendar and Outlook to schedule on-call shifts for team members.
+
+Follow these links to learn more:
+
+{{< section >}}
\ No newline at end of file
diff --git a/docs/sources/calendar-schedules/_index.md b/docs/sources/calendar-schedules/_index.md
new file mode 100644
index 0000000000..c0c68cea85
--- /dev/null
+++ b/docs/sources/calendar-schedules/_index.md
@@ -0,0 +1,15 @@
++++
+title = "On-call calendar scheduling"
+description = ""
+keywords = ["Grafana", "oncall", "on-call", "calendar"]
+aliases = []
+weight = 1100
++++
+
+# Use calendars to create on-call schedules
+
+Grafana OnCall allows you to use any calendar service that uses the iCal format to create customized on-call schedules for team members. Using Grafana OnCall, you can create a primary calendar that acts as a read-only schedule, and an override calendar that allows all team members to modify schedules as they change.
+
+To learn more about creating on-call calendars, see the following topics:
+
+{{< section >}}
\ No newline at end of file
diff --git a/docs/sources/calendar-schedules/about-calendars.md b/docs/sources/calendar-schedules/about-calendars.md
new file mode 100644
index 0000000000..c01e2df9ec
--- /dev/null
+++ b/docs/sources/calendar-schedules/about-calendars.md
@@ -0,0 +1,13 @@
++++
+title = "About Grafana OnCall calendar scheduling"
+description = ""
+keywords = ["Grafana", "oncall", "on-call", "calendar"]
+aliases = []
+weight = 100
++++
+
+# About Grafana OnCall scheduling
+
+You can use any calendar with an iCal address to schedule on-call times for users. During these times, notifications configured in escalation chains with the **Notify users from an on-call schedule** setting will be sent to the the person scheduled. You can also schedule multiple users for overlapping times, and assign prioritization labels for the user that you would like to notify.
+
+When you create a schedule, you will be able to select a Slack channel, associated with your OnCall account, that will notify users when there are errors or notifications regarding the assigned on-call shifts.
\ No newline at end of file
diff --git a/docs/sources/calendar-schedules/create-calendar.md b/docs/sources/calendar-schedules/create-calendar.md
new file mode 100644
index 0000000000..551d4632d1
--- /dev/null
+++ b/docs/sources/calendar-schedules/create-calendar.md
@@ -0,0 +1,44 @@
++++
+title = "Create an on-call schedule calendar"
+description = ""
+keywords = ["Grafana", "oncall", "on-call", "calendar"]
+aliases = []
+weight = 300
++++
+
+# Create an on-call schedule calendar
+
+Create a primary calendar and an optional override calendar to schedule on-call shifts for team members.
+
+1. In the **Scheduling** section of Grafana OnCall, click **+ Create schedule**.
+
+1. Give the schedule a name.
+
+1. Create a new calendar in your calendar service and locate the secret iCal URL. For example, in a Google calendar, this URL can be found in **Settings > Settings for my calendars > Integrate calendar**.
+
+1. Copy the secret iCal URL. In OnCall, paste it into the **Primary schedule for iCal URL** field.
+ The permissions you set when you create the calendar determine who can modify the calendar.
+
+1. Click **Create Schedule**.
+
+1. Schedule on-call times for team members.
+
+ Use the Grafana username of team members as the event name to schedule their on-call times. You can take advantage of all of the features of your calendar service.
+
+1. Create overlapping schedules (optional).
+
+ When you create schedules that overlap, you can prioritize a schedule by adding a level marker. For example, if users AliceGrafana and BobGrafana have overlapping schedules, but BobGrafana is the primary contact, you would name his event `[L1] BobGrafana`, AliceGrafana maintains the default `[L0]` status, and would not receive notifications during the overlapping time. You can prioritize up to and including a level 9 prioritization, or `[L9]`.
+
+# Create an override calendar (optional)
+
+You can use an override calendar to allow team members to schedule on-call duties that will override the primary schedule. An override option allows flexibility without modifying the primary schedule. Events scheduled on the override calendar will always override overlapping events on the primary calendar.
+
+1. Create a new calendar using the same calendar service you used to create the primary calendar.
+
+ Be sure to set permissions that allow team members to edit the calendar.
+
+1. In the scheduling section of Grafana OnCall, select the primary calendar you want to override.
+
+1. Click **Edit**.
+
+1. Enter the secret iCal URL in the **Overrides schedule iCal URL** field and click **Update**.
\ No newline at end of file
diff --git a/docs/sources/chat-options/_index.md b/docs/sources/chat-options/_index.md
new file mode 100644
index 0000000000..fcd147c763
--- /dev/null
+++ b/docs/sources/chat-options/_index.md
@@ -0,0 +1,13 @@
++++
+title = "Messaging application configuration"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "oncall", "slack"]
+weight = 700
++++
+
+# Messaging application configuration
+
+Grafana OnCall directly supports the export of alert notifications to some popular messaging applications like Slack and Telegram. You can use outgoing webhooks to applications that aren't directly supported. For information on configuring outgoing webhooks, see [Send alert group notifications by webhook]({{< relref "../integrations/webhooks/configure-outgoing-webhooks.md" >}}).
+
+To configure supported messaging apps, see the following topics:
+
+{{< section >}}
\ No newline at end of file
diff --git a/docs/sources/chat-options/configure-slack.md b/docs/sources/chat-options/configure-slack.md
new file mode 100644
index 0000000000..447ca25fc6
--- /dev/null
+++ b/docs/sources/chat-options/configure-slack.md
@@ -0,0 +1,33 @@
++++
+title = "Configure Slack for Grafana OnCall"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "oncall", "slack"]
+weight = 100
++++
+
+# Configure Slack for Grafana OnCall
+Grafana OnCall integrates closely with your Slack workspace to deliver alert group notifications to individuals, groups, and team members.
+
+## Connect to Slack
+
+Connect your organization's Slack workspace to your Grafana OnCall instance.
+
+>**NOTE:** Only Grafana users with the administrator role can configure OnCall settings.
+
+1. In OnCall, click on the **ChatOps** tab and select Slack in the side menu.
+1. Click **Install Slack integration**.
+1. Read the notice and click the button to proceed to the Slack website.
+1. Sign in to your organization's workspace.
+1. Click **Allow** to allow OnCall to access Slack.
+1. Ensure users verify their Slack accounts in their user profile in OnCall.
+
+## Configure Slack in OnCall
+
+In the Slack settings for Grafana OnCall, administrators can set a default Slack channel for notifications and opt to set reminders for acknowledged alerts that can timeout and revert an alert group to the unacknowledged state.
+
+1. In OnCall, click on the **ChatOps** tab and select Slack in the side menu.
+1. In the first dropdown menu, select a default Slack channel.
+ When you set up escalation policies to notify Slack channels of incoming alerts, the default will be the one you set here. You will still have the option to select from all the channels available in your organization.
+1. In **Additional settings** you can choose how to remind users of acknowledged but unresolved alert groups. You can also select whether and or when to automatically revoke the "acknowledged" status from an alert group to an unacknowledged state.
+
+## Slack settings for on-call calendar scheduling notifications
+Admins can configure settings in Slack to notify people and groups about on-call schedules. When an on-call shift notification is sent to a person or channel, click the gear button to access **Notification preferences**. Use the options to configure the behavior of future shift notifications.
\ No newline at end of file
diff --git a/docs/sources/chat-options/configure-telegram.md b/docs/sources/chat-options/configure-telegram.md
new file mode 100644
index 0000000000..fc30f0706b
--- /dev/null
+++ b/docs/sources/chat-options/configure-telegram.md
@@ -0,0 +1,39 @@
++++
+title = "Configure Telegram for Grafana OnCall"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "oncall", "telegram"]
+weight = 300
++++
+
+# Configure Telegram for Grafana OnCall
+
+You can use Telegram to deliver alert group notifications to a dedicated channel, and allow users to perform notification actions.
+
+Each alert group notification is assigned a dedicated discussion. Users can perform notification actions (acknowledge, resolve, silence), create reports, and discuss alerts in the comments section of the discussions.
+
+## Connect to Telegram
+
+Connect your organization's Telegram account to your Grafana OnCall instance by following the instructions provided in OnCall. You can use the following steps as a reference.
+
+>**NOTE:** Only Grafana users with the administrator role can configure OnCall settings.
+
+1. In OnCall, click on the **ChatOps** tab and select Telegram in the side menu.
+1. Click **Connect Telegram channel** and follow the instructions, mirrored here for reference. A unique verification code will be generated that you must use to activate the channel.
+1. In your team Telegram account, create a new channel, and set it to **Private**.
+1. In **Manage Channel**, make sure **Sign messages** is enabled.
+1. Create a new discussion group.
+ This group handles alert actions and comments.
+1. Add the discussion group to the channel.
+ In **Manage Channel**, click **Discussion** to find and add the new group.
+1. In OnCall, click the link to the OnCall bot to add it to your contacts.
+1. In Telegram, add the bot to your channel as an Admin. Allow it to **Post Messages**.
+1. Add the bot to the discussion group.
+1. In OnCall, send the provided verification code to the channel.
+1. Make sure users connect to Telegram in their OnCall user profile.
+
+## Configure Telegram user settings in OnCall
+
+1. In your profile, find the Telegram setting and click **Connect**.
+1. Click **Connect automatically** for the bot to message you and to bring up your telegram account.
+1. Click **Start** when the OnCall bot messages you.
+
+If you want to connect manually, you can click the URL provided and then **SEND MESSAGE**. In your Telegram account, click **Start**.
\ No newline at end of file
diff --git a/docs/sources/configure-notifications.md b/docs/sources/configure-notifications.md
new file mode 100644
index 0000000000..b66d672f37
--- /dev/null
+++ b/docs/sources/configure-notifications.md
@@ -0,0 +1,121 @@
++++
+title = "Getting started with Grafana OnCall"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call"]
+aliases = ["/docs/grafana-cloud/oncall/configure-notifications"]
+draft = true
++++
+
+# Configure users, notifications, and on-call schedules
+
+These procedures introduce you to the configuration of user settings, how to set up escalation policies, and how to use your calendar service for on-call scheduling.
+
+## Before you begin
+
+You must have a Grafana Cloud account and be connected to a data source with alerts configured.
+
+Each supported integration and the associated monitoring system has a slightly different configuration method. These methods will not be explained in this guide, however, you can follow the online instructions provided when adding an integration.
+
+## Configure user notification policies
+
+You can configure how each user will receive notifications when they are assigned in escalation policies.
+
+1. Find users.
+
+ Select the **Users** tab and use the browser to search for a user in your organization.
+
+1. Configure user settings.
+
+ Add and verify a phone number and a Slack username if you want to deliver notifications using these mediums.
+
+
+ >**NOTE:** To edit a user's username, email, or role, you must do so in the **Users** tab in the **Configuration** menu of your Grafana instance.
+
+1. Configure notification settings.
+
+ You can configure the notification medium and frequency for each user. **Important Notifications** are specified in escalation steps.
+
+## Connect to integration data sources
+
+You use Grafana OnCall to connect to the monitoring services of your data sources listed in the Grafana OnCall **Integrations** section.
+
+1. Connect to a data source with configured alerts.
+
+ In Grafana OnCall, click on the **Integrations** tab and click **+ New integration for receiving alerts**.
+
+1. Select an integration from the provided options.
+
+ If you want to use an integration that is not listed, you must use webhooks.
+
+1. Configure your integration.
+
+ Each integration has a different method of connecting to Grafana OnCall. For example, if you want to connect to your Grafana data source, select Grafana and follow the instructions.
+
+## Configure escalation policies
+
+You can use **escalation chains** to determine ordered escalation procedures. Configuring escalation chains allows you to set up a chain of incident notification actions that trigger if certain conditions that you specify are not met.
+
+1. Click on the integration tile for which you want to define escalation policies.
+
+ The **Escalations** section for the notification is in the pane to the right of the list of notifications.
+ You can click **Change alert template and grouping** to customize the look of the alert. You can also do this by clicking the **Settings** (gear) icon in the integration tile.
+
+1. Create an escalation chain.
+
+ In the escalation pane, click the **escalate to** menu to choose from previously added escalation chains, or create a new one by clicking **Create a new**. This will be the name of the escalation policy you define.
+
+1. Add escalation steps.
+
+ Click **Add escalation step** to choose from a set of actions and specify their triggering conditions. By default, the first step is to notify a slack channel or user. Specify users or channels or toggle the switch to turn this step off.
+
+ To mark an escalation as **Important**, select the option from the step **Start** dropdown menu. User notification policies can be separately defined for **Important** and **Default** escalations.
+
+1. Add a route.
+
+ To add a route, click **Add Route**.
+
+ You can set up a single route and specify notification escalation steps, or you can add multiple routes, each with its own configuration.
+
+ Each route added to an escalation policy follows an `IF`, `ELSE IF`, and `ELSE` path and depends on the type of alert you specify using a regular expression that matches content in the payload body of the alert. You can also specify where to send the notification for each route.
+
+ For example, you can send notifications for alert incidents with `\"severity\": \"critical\"` in the payload to an escalation chain called `Bob_OnCall`. You can create a different route for alerts with the payload `\"namespace\" *: *\"synthetic-monitoring-dev-.*\"` and select a escalation chain called `NotifySecurity`.
+
+ You can set up escalation steps for each route in a chain.
+
+ >**NOTE:** When you modify an escalation chain or a route, it will modify that escalation chain across all integrations that use it.
+
+## Use calendars to configure on-call schedules
+
+You can use any calendar with an iCal address to schedule on-call times for users. During these times, notifications configured in escalation chains with the **Notify users from an on-call schedule** setting will be sent to the the person scheduled. You can also schedule multiple users for overlapping times, and assign prioritization labels for the user that you would like to notify.
+
+1. In the **Scheduling** section of Grafana OnCall, click **+ Create schedule**.
+
+1. Give the schedule a name.
+
+1. Create a new calendar in your calendar service and locate the secret iCal URL. For example, in a Google calendar, this URL can be found in **Settings > Settings for my calendars > Integrate calendar**.
+
+1. Copy the secret iCal URL. In OnCall, paste it into the **Primary schedule for iCal URL** field.
+ The permissions you set for the calendar determine who can modify the calendar.
+
+1. Click **Create Schedule**.
+
+1. Schedule on-call times for team members.
+
+ Use the usersname of team members as the event name to schedule their on-call times. You can take advantage of all of the features of your calendar service.
+
+1. (Optional) Create overlapping schedules.
+
+ When you create schedules that overlap, you can prioritize a schedule by adding a level marker. For example, if users AliceGrafana and BobGrafana have overlapping schedules, but BobGrafana is the primary contact, you would name his event `[L1] BobGrafana`, AliceGrafana maintains the default `[L0]` status, and would not receive notifications during the overlapping time. You can prioritize up to and including level 9, or `[L9]`.
+
+### (Optional) Create an override calendar.
+
+You can use an override calendar to allow team members to schedule on-call duties that will override the primary schedule. An override option allows flexibility without modifying the primary schedule. Events scheduled on the override calendar will always override overlapping events on the primary calendar.
+
+1. Create a new calendar using the same calendar service you used to create the primary calendar.
+
+ Be sure to set permissions that allow team members to edit the calendar.
+
+1. In the scheduling section of Grafana OnCall, select the primary calendar you want to override.
+
+1. Click **Edit**.
+
+1. Enter the secret iCal URL in the **Overrides schedule iCal URL** field and click **Update**.
\ No newline at end of file
diff --git a/docs/sources/configure-user-settings.md b/docs/sources/configure-user-settings.md
new file mode 100644
index 0000000000..714c564fd8
--- /dev/null
+++ b/docs/sources/configure-user-settings.md
@@ -0,0 +1,48 @@
++++
+title = "Configure user settings"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "oncall", "integrations"]
+weight = 900
++++
+
+# User settings for Grafana OnCall
+
+Grafana OnCall is configured based on the teams you've created on the organization level of your Grafana instance, in **Configuration > Teams**. Administrators can create a different configuration for each team, and can navigate between team configurations in the **Select Team** dropdown menu in the **Incidents** section of Grafana OnCall.
+
+Users can edit their contact information, but user permissions are assigned at the Cloud portal level.
+
+## Configure user notification policies
+
+Administrators can configure how each user will receive notifications when they are are scheduled to receive them in escalation chains. Users can verify phone numbers and email addresses.
+
+>**NOTE**: You cannot add users or manage permissions in Grafana OnCall. Most user settings are found on the organizational level of your Grafana instance in **Configuration > Users**.
+
+1. Find users.
+
+ Select the **Users** tab and use the browser to search for a user in the team associated with the OnCall configuration.
+
+1. Configure user settings.
+
+ Add and verify a phone number, a Slack username, and a Telegram account if you want to receive notifications using these mediums.
+
+ >**NOTE:** To edit a user's profile username, email, or role, you must do so in the **Users** tab in the **Configuration** menu of your Grafana instance.
+
+1. Configure notification settings.
+
+ Specify the notification medium and frequency for each user. Notification steps will be followed in the order they are listed.
+
+ The settings you specify in **Default Notifications** dictate how a user is notified for most escalation thresholds.
+
+ **Important Notifications** are labeled in escalation chains. If an escalation event is marked as an important notification, it will bypass **Default Notification** settings and notify the user by the method specified.
+
+## Configure Telegram user settings in OnCall
+
+1. In your profile, find the Telegram setting and click **Connect**.
+1. Click **Connect automatically** for the bot to message you and to bring up your telegram account.
+1. Click **Start** when the OnCall bot messages you.
+
+If you want to connect manually, you can click the URL provided and then **SEND MESSAGE**. In your Telegram account, click **Start**.
+
+## Configure Slack user settings in OnCall
+
+1. In your profile, find the Slack setting and click **Connect**.
+1. Follow the instructions to verify your account.
\ No newline at end of file
diff --git a/docs/sources/escalation-policies/_index.md b/docs/sources/escalation-policies/_index.md
new file mode 100644
index 0000000000..976c9cf3b2
--- /dev/null
+++ b/docs/sources/escalation-policies/_index.md
@@ -0,0 +1,13 @@
++++
+title = "Escalation policies"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "oncall", "integrations"]
+weight = 500
++++
+
+# Grafana OnCall escalation policies
+
+Administrators can create escalation policies to automatically send alert group notifications to recipients. These policies define how, where, and when to send notifications.
+
+See the following topics for more information:
+
+{{< section >}}
\ No newline at end of file
diff --git a/docs/sources/escalation-policies/about-escalation-policies.md b/docs/sources/escalation-policies/about-escalation-policies.md
new file mode 100644
index 0000000000..509544e571
--- /dev/null
+++ b/docs/sources/escalation-policies/about-escalation-policies.md
@@ -0,0 +1,22 @@
++++
+title = "About escalation policies"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "oncall", "integrations"]
+weight = 100
++++
+
+# About escalation policies
+
+Escalation policies dictate how users and groups are notified when an alert notification is created. They can be very simple, or very complex. You can define as many escalation configurations for an integration as you need, and you can send notifications for certain alerts to a designated place when certain conditions are met, or not met.
+
+Escalation policies have three main parts:
+* User settings, where a user sets up their preferred or required notification method.
+* An **escalation chain**, which can have one or more steps that are followed in order when a notification is triggered.
+* A **route**, that allows administrators to manage notifications by flagging expressions in an alert payload.
+
+## Escalation chains
+An escalation chain can have many steps, or only one step. For example, steps can be configured to notify multiple users in some order, notify users that are scheduled for on-call shifts, ping groups in Slack, use outgoing webhooks to integrate with other services, such as JIRA, and do a number of other automated notification tasks.
+
+## Routes
+An escalation workflow can employ **routes** that administrators can configure to filter alerts by regular expressions in their payloads. Notifications for these alerts can be sent to individuals, or they can make use of a new or existing escalation chain.
+
+To learn how to configure escalation chains and routes, see [Configure escalation policies]({{< relref "configure-escalation-policies">}}).
\ No newline at end of file
diff --git a/docs/sources/escalation-policies/configure-escalation-policies.md b/docs/sources/escalation-policies/configure-escalation-policies.md
new file mode 100644
index 0000000000..aa8cc5e8e5
--- /dev/null
+++ b/docs/sources/escalation-policies/configure-escalation-policies.md
@@ -0,0 +1,42 @@
++++
+title = "Configure escalation policies"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "oncall", "integrations"]
+weight = 300
++++
+
+# Configure escalation policies
+Set up escalation chains and routes to configure escalation behavior for alert group notifications.
+
+## Configure escalation chains
+You can create and edit escalation chains in two places: within **Integrations**, by clicking on an integration tile, and in **Escalation Chains**. The following steps are for the **Integrations** workflow, but are generally applicable in both situations.
+
+You can use **escalation chains** and **routes** to determine ordered escalation procedures. Escalation chains allow you to set up a series of alert group notification actions that trigger if certain conditions that you specify are met or not met.
+
+1. Click on the integration tile for which you want to define escalation policies.
+
+ The **Escalations** section for the notification is in the pane to the right of the list of notifications.
+ You can click **Change alert template and grouping** to customize the look of the alert. You can also do this by clicking the **Settings** (gear) icon in the integration tile.
+
+1. Create an escalation chain.
+
+ In the escalation pane, click **Escalate to** to choose from previously added escalation chains, or create a new one by clicking **Make a copy** or **Create a new chain**. This will be the name of the escalation policy you define.
+
+1. Add escalation steps.
+
+ Click **Add escalation step** to choose from a set of actions and specify their triggering conditions. By default, the first step is to notify a slack channel or user. Specify users or channels or toggle the switch to turn this step off.
+
+ To mark an escalation as **Important**, select the option from the step **Start** dropdown menu. User notification policies can be separately defined for **Important** and **Default** escalations.
+
+## Create a route
+
+To add a route, click **Add Route**.
+
+You can set up a single route and specify notification escalation steps, or you can add multiple routes, each with its own configuration.
+
+Each route added to an escalation policy follows an `IF`, `ELSE IF`, or `ELSE` path and depends on the type of alert you specify using a regular expression that matches content in the payload body of the alert. You can also specify where to send the notification for each route.
+
+For example, you can send notifications for alerts with `\"severity\": \"critical\"` in the payload to an escalation chain called `Bob_OnCall`. You can create a different route for alerts with the payload `\"namespace\" *: *\"synthetic-monitoring-dev-.*\"` and select a escalation chain called `NotifySecurity`.
+
+You can set up escalation steps for each route in a chain.
+
+>**NOTE:** When you modify an escalation chain or a route, it will modify that escalation chain across all integrations that use it.
\ No newline at end of file
diff --git a/docs/sources/integrations/_index.md b/docs/sources/integrations/_index.md
new file mode 100644
index 0000000000..f0061636d8
--- /dev/null
+++ b/docs/sources/integrations/_index.md
@@ -0,0 +1,11 @@
++++
+title = "Connect to Grafana OnCall"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "oncall", "integrations"]
+weight = 100
+aliases = ["/docs/grafana-cloud/oncall/integrations/"]
++++
+
+# Connect to Grafana OnCall
+There are many alert sources that Grafana OnCall directly supports. Those that aren't listed in the integrations menu in OnCall can be connected using webhooks and configured with custom templates. To configure integrations that are directly supported, follow the instructions provided after selecting a data source in the **Integrations** tab in Grafana OnCall. To review general instructions, and also specific integration instructions for some popular integration options, see the following topics:
+
+{{< section >}}
diff --git a/docs/sources/integrations/add-alertmanager.md b/docs/sources/integrations/add-alertmanager.md
new file mode 100644
index 0000000000..bea29cd82a
--- /dev/null
+++ b/docs/sources/integrations/add-alertmanager.md
@@ -0,0 +1,58 @@
++++
+title = "Configure alert notifications with Alertmanager"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "Alertmanager", "Prometheus"]
+aliases = ["/docs/grafana-cloud/oncall/integrations/add-alertmanager/"]
+weight = 500
++++
+
+# Alertmanager (Prometheus)
+
+The Alertmanager integration handles alerts sent by client applications such as the Prometheus server.
+
+Grafana OnCall provides grouping abilities when processing alerts from Alertmanager, including initial deduplicating, grouping, and routing the alerts to Grafana OnCall.
+
+## Connect Alertmanager to Grafana OnCall
+
+You must have an Admin role to connect to Grafana OnCall.
+
+1. Navigate to the **Integrations** tab in Grafana OnCall.
+
+1. Click on the Alertmanager icon.
+
+1. Follow the instructions that display in the dialog box to find a unique integration URL in the monitoring configuration.
+
+
+
+## Configure Alertmanager
+
+Update the `receivers` section of your Alertmanager configuration to use a unique integration URL:
+```
+route:
+ receiver: 'oncall'
+ group_by: [alertname, datacenter, app]
+
+receivers:
+- name: 'oncall'
+ webhook_configs:
+ - url:
+ send_resolved: true
+```
+
+## Configure grouping with Alertmanager and Grafana OnCall
+
+You can use the grouping mechanics of Alertmanager and Grafana OnCall to configure settings for groups of alert notifications.
+
+Alertmanager offers three grouping settings:
+
+- `group_by` provides two options, `instance` or `job`.
+- `group_wait` sets the length of time to initially wait before sending a notification for a particular group of alerts. For example, `group_wait` can be set to 45s.
+
+ Setting a high value for `group_wait` reduces alert noise and minimizes interruption, but it may introduce longer delays in receiving alert notifications. To set an appropriate wait time, consider whether the group of alerts will be the same as those previously sent.
+
+- `group_interval` sets the length of time to wait before sending notifications about new alerts that have been added to a group of alerts that have been previously alerted on. This setting is usually set to five minutes or more.
+
+ During high alert volume periods, Alertmanager will send alerts at each `group_interval`, which can mean a lot of distraction. Grafana OnCall grouping will help manage this in the following ways:
+
+ - Grafana OnCall groups alerts based on the first label of each alert.
+
+ - Grafana OnCall marks an incident as resolved only when the amount of grouped alerts with state `resolved` equals the amount of alerts with state `firing`.
diff --git a/docs/sources/integrations/add-grafana-alerting.md b/docs/sources/integrations/add-grafana-alerting.md
new file mode 100644
index 0000000000..9962786bd7
--- /dev/null
+++ b/docs/sources/integrations/add-grafana-alerting.md
@@ -0,0 +1,60 @@
++++
+title = "Configure alert notifications with Grafana Alerting"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "Prometheus"]
+aliases = ["/docs/grafana-cloud/oncall/integrations/add-grafana-alerting/"]
+weight = 300
++++
+
+# Connect Grafana Alerting to Grafana OnCall
+
+You must have the Admin role assigned to connect to Grafana OnCall.
+
+1. Navigate to the **Integrations** tab in Grafana OnCall.
+
+1. Click on the Grafana logo.
+
+1. Follow the instructions that display in the dialog box to find a unique integration URL in the monitoring configuration.
+
+## Grafana installations
+
+Grafana OnCall can be set up using two methods:
+
+- Grafana Alerting: Grafana OnCall is connected to the same Grafana instance being used to manage Grafana OnCall.
+
+- Grafana (External): Grafana OnCall is connected to one or more Grafana instances separate from the one being used to manage Grafana OnCall.
+
+### Grafana Cloud Alerting
+Use the following method if you are connecting Grafana OnCall with alerts coming from the same Grafana instance from which Grafana OnCall is being managed.
+
+1. In Grafana OnCall, navigate to the **Integrations** tab and select **New Integration for receiving alerts**.
+
+1. Click **Quick connect** in the **Grafana Alerting** tile. This will automatically create the integration in Grafana OnCall as well as the required contact point in Alerting.
+
+ >**Note:** You must connect the contact point with a notification policy. For more information, see [Contact points in Grafana Alerting](https://grafana.com/docs/grafana/latest/alerting/unified-alerting/contact-points/)
+
+1. Determine the escalation chain for the new integration by either selecting an existing one or by creating a new chain. For more information on creating escalation chains, see: [Configure alert notifications with Grafana OnCall]({{< relref "../configure-notifications" >}})..
+
+1. In Grafana Cloud Alerting, navigate to **Alerting > Contact Points** and find a contact point with a name matching the integration you created in Grafana OnCall.
+
+1. Click the **Edit** (pencil) icon, then click **Test**. This will send an alert to Grafana OnCall.
+
+### Grafana (External)
+
+Connect Grafana OnCall with alerts coming from an instance of Grafana different from the one on which Grafana OnCall is being managed:
+1. In Grafana OnCall, navigate to the **Integrations** tab and select **New Integration for receiving alerts**.
+
+1. Select the **Grafana** tile.
+
+1. View and save the URL needed to connect.
+
+1. Determine the escalation chain for the new integration by either selecting an existing one or by creating a new chain. For more information on creating escalation chains, see: [Configure alert notifications with Grafana OnCall]({{< relref "../configure-notifications/" >}}).
+
+1. Go to the other Grafana instance to connect to Grafana OnCall and navigate to **Alerting > Contact Points**.
+
+1. Select **New Contact Point**.
+
+1. Choose the contact point type `webhook`, then paste the URL generated in step 3 into the URL field.
+
+ >**Note:** You must connect the contact point with a notification policy. For more information, see [Contact points in Grafana Alerting](https://grafana.com/docs/grafana/latest/alerting/unified-alerting/contact-points/).
+
+1. Click the **Edit** (pencil) icon, then click **Test**. This will send an alert to Grafana OnCall.
\ No newline at end of file
diff --git a/docs/sources/integrations/add-integration.md b/docs/sources/integrations/add-integration.md
new file mode 100644
index 0000000000..cbdbdc98f4
--- /dev/null
+++ b/docs/sources/integrations/add-integration.md
@@ -0,0 +1,22 @@
++++
+title = "Integrate with data sources"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "Alertmanager", "Prometheus"]
+aliases = ["/docs/grafana-cloud/oncall/integrations/add-integration/"]
+weight = 100
++++
+
+# Integrate with data sources
+
+Grafana OnCall can connect directly to the monitoring services of your data sources listed in the Grafana OnCall **Integrations** section.
+
+1. Connect to a data source with configured alerts.
+
+ In Grafana OnCall, click on the **Integrations** tab and click **+ New integration for receiving alerts**.
+
+1. Select an integration from the provided options.
+
+ If you want to use an integration that is not listed, you must use webhooks. To learn more about using webhooks see [Integrate with webhooks]({{< relref "/integrations/webhooks/add-webhook-integration/" >}}).
+
+1. Configure your integration.
+
+ Each integration has a different method of connecting to Grafana OnCall. For example, if you want to connect to your Grafana data source, select Grafana and follow the instructions.
\ No newline at end of file
diff --git a/docs/sources/integrations/add-zabbix.md b/docs/sources/integrations/add-zabbix.md
new file mode 100644
index 0000000000..c22c2437d6
--- /dev/null
+++ b/docs/sources/integrations/add-zabbix.md
@@ -0,0 +1,137 @@
++++
+title = "Configure alert notifications with Zabbix"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "Zabbix"]
+weight = 700
++++
+
+# Connect Zabbix to Grafana OnCall
+
+Zabbix is an open-source monitoring software tool for diverse IT components, including networks, servers, virtual machines, and cloud services. Zabbix provides monitoring for metrics such as network utilization, CPU load, and disk space consumption.
+
+To connect Grafana OnCall with Zabbix using the following procedure, you must have an Admin role assigned.
+
+1. Navigate to the **Integrations** tab in Grafana OnCall.
+
+1. Follow the instructions that display in the dialog box to use a unique integration URL in the monitoring configuration.
+
+
+
+
+## Configure the Zabbix server
+
+1. Deploy a Zabbix playground if you don't have one set up:
+ ```bash
+ docker run --name zabbix-appliance -t \
+ -p 10051:10051 \
+ -p 80:80 \
+ -d zabbix/zabbix-appliance:latest
+ ```
+
+1. Establish an ssh connection to a Zabbix server.
+
+ ```bash
+ docker exec -it zabbix-appliance bash
+ ```
+
+1. Place the [grafana_oncall.sh](#grafana_oncallsh-script) script in the `AlertScriptsPath` directory specified within the Zabbix server configuration file (zabbix_server.conf).
+
+ ```bash
+ grep AlertScriptsPath /etc/zabbix/zabbix_server.conf
+ ```
+ >**Note:** The script must be executable by the user running the zabbix_server binary (usually "zabbix") on the Zabbix server. For example, `chmod +x grafana_oncall.sh`
+
+ ``` bash
+ ls -lh /usr/lib/zabbix/alertscripts/grafana_oncall.sh
+ -rw-r--r-- 1 root root 1.5K Jun 6 07:52 /usr/lib/zabbix/alertscripts/grafana_oncall.sh
+ ```
+
+## Configure Zabbix alerts
+Within Zabbix web interface, do the following:
+
+1. In a browser, open localhost:80.
+
+1. Navigate to **Adminitstration > Media Types > Create Media Type**.
+
+
+1. Create a Media Type with the following fields.
+ * Name: Grafana OnCall
+ * Type: script
+ * Script parameters:
+ * {ALERT.SENDTO}
+ * {ALERT.SUBJECT}
+ * {ALERT.MESSAGE}
+
+
+
+### Set the {ALERT.SEND_TO} value
+To send alerts to Grafana OnCall, the {ALERT.SEND_TO} value must be set in the [user media configuration](https://www.zabbix.com/documentation/3.4/manual/config/notifications/media/script#user_media).
+
+1. In the web UI, navigate to **Administration > Users** and open the **user properties** form.
+
+1. In the **Media** tab, click **Add** and copy the link from Grafana OnCall in the `Send to` field.
+
+
+1. Click **Test** in the last column to send a test alert to Grafana OnCall.
+
+
+1. Specify **Send to** OnCall using the unique integration URL from the above step in the testing window that opens.
+Create a test message with a body and optional subject and click **Test**.
+
+
+## Grouping and auto-resolve of Zabbix notifications
+Grafana OnCall provides grouping and auto-resolve of Zabbix notifications.
+Use the following procedure to configure grouping and auto-resolve.
+
+1. Provide a parameter as an identifier for group differentiation to Grafana OnCall.
+
+1. Append that variable to the subject of the action as `ONCALL_GROUP: ID`, where `ID` is any of the Zabbix [macros](https://www.zabbix.com/documentation/4.2/manual/appendix/macros/supported_by_location).
+For example, `{EVENT.ID}`. The Grafana OnCall script [grafana_oncall.sh](#grafana_oncallsh-script) extracts this event and passes the `alert_uid` to Grafana OnCall.
+
+1. To enable auto-resolve within Grafana Oncall, the "Resolved" keyword is required in the **Default subject** field in **Recovered operations**.
+
+
+
+## grafana_oncall.sh script
+```bash
+#!/bin/bash
+# This is the modification of original ericos's shell script.
+
+# Get the url ($1), subject ($2), and message ($3)
+url="$1"
+subject="${2//$'\r\n'/'\n'}"
+message="${3//$'\r\n'/'\n'}"
+
+# Alert state depending on the subject indicating whether it is a trigger going in to problem state or recovering
+recoversub='^RECOVER(Y|ED)?$|^OK$|^Resolved.*'
+
+if [[ "$subject" =~ $recoversub ]]; then
+ state='ok'
+else
+ state='alerting'
+fi
+
+payload='{
+ "title": "'${subject}'",
+ "state": "'${state}'",
+ "message": "'${message}'"
+}'
+
+# Alert group identifier from the subject of action. Grouping will not work without ONCALL_GROUP in the action subject
+regex='ONCALL_GROUP: ([a-zA-Z0-9_\"]*)'
+if [[ "$subject" =~ $regex ]]; then
+ alert_uid=${BASH_REMATCH[1]}
+ payload='{
+ "alert_uid": "'${alert_uid}'",
+ "title": "'${subject}'",
+ "state": "'${state}'",
+ "message": "'${message}'"
+ }'
+fi
+
+return=$(curl $url -d "${payload}" -H "Content-Type: application/json" -X POST)
+```
+
+## More Information
+For more information on Zabbix scripts, see [scripts for notifications](https://www.zabbix.com/documentation/4.2/manual/config/notifications/media/script).
\ No newline at end of file
diff --git a/docs/sources/integrations/webhooks/_index.md b/docs/sources/integrations/webhooks/_index.md
new file mode 100644
index 0000000000..026458a8d6
--- /dev/null
+++ b/docs/sources/integrations/webhooks/_index.md
@@ -0,0 +1,12 @@
++++
+title = "Use webhooks to send and receive alerts"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "Alertmanager", "Prometheus"]
+weight = 900
++++
+
+# Use webhooks to send and receive alerts
+You can use webhooks to send alert group notifications, and also to receive alerts in the event that the data source for for the alerts is not directly supported by Grafana OnCall. You can also use custom templates to format your alerts.
+
+Follow these links to learn more about using webhooks for OnCall alert notifications:
+
+{{< section >}}
\ No newline at end of file
diff --git a/docs/sources/integrations/webhooks/add-webhook-integration.md b/docs/sources/integrations/webhooks/add-webhook-integration.md
new file mode 100644
index 0000000000..b6de74d470
--- /dev/null
+++ b/docs/sources/integrations/webhooks/add-webhook-integration.md
@@ -0,0 +1,39 @@
++++
+title = "Webhook integration"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "Alertmanager", "Prometheus"]
+weight = 100
++++
+
+# Integrate with your data source using webhooks
+
+Grafana OnCall directly supports integrations from many data sources, but you can connect to any data source that isn't listed in the **Create Integration** page by using webhooks.
+
+1. In **Integrations**, click **+ New integration for receiving alerts**.
+1. Select a webhook format.
+ There are two available formats. **Webhook** and **Formatted Webhook**.
+ * **Webhook** will pull all of the raw JSON information and display it in the manner that it is received.
+ * **Formatted Webhook** can be used if the body of the alerts sent by your monitoring service are formatted in a way that OnCall can read. The following fields are recognized, but none are required:
+ * `alert_uid`: a unique alert ID for grouping.
+ * `title`: a title.
+ * `image_url`: a URL for an image attached to alert.
+ * `state`: either `ok` or `alerting`. Helpful for auto-resolving.
+ * `link_to_upstream_details`: link back to your monitoring system.
+ * `message`: alert details.
+
+ To learn how to use custom alert templates for formatted webhooks, see [Configure custom alert templates]({{< relref "../create-custom-templates/" >}}).
+
+1. Use the unique webhook URL for requests. For example:
+
+ ```json
+ curl -X POST \
+ https://a-prod-us-central-0.grafana.net/integrations/v1/formatted_webhook/m12xmIjOcgwH74UF8CN4dk0Dh/ \
+ -H 'Content-Type: Application/json' \
+ -d '{
+ "alert_uid": "08d6891a-835c-e661-39fa-96b6a9e26552",
+ "title": "The whole system is down",
+ "image_url": "https://upload.wikimedia.org/wikipedia/commons/e/ee/Grumpy_Cat_by_Gage_Skidmore.jpg",
+ "state": "alerting",
+ "link_to_upstream_details": "https://en.wikipedia.org/wiki/Downtime",
+ "message": "Smth happened. Oh no!"
+ }'
+ ```
\ No newline at end of file
diff --git a/docs/sources/integrations/webhooks/configure-outgoing-webhooks.md b/docs/sources/integrations/webhooks/configure-outgoing-webhooks.md
new file mode 100644
index 0000000000..396ca4cd5a
--- /dev/null
+++ b/docs/sources/integrations/webhooks/configure-outgoing-webhooks.md
@@ -0,0 +1,39 @@
++++
+title = "Send alert notifications by webhook"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "webhooks"]
+weight = 500
++++
+
+# Send alert group notifications by webhook
+You can configure outgoing webhooks to send alerts to destination. Once a webhook is created, you can choose the webhook as a notification method in escalation steps.
+
+1. In Grafana OnCall, navigate to **Outgoing Webhooks** and click **+ Create**.
+ This is also the place to edit and delete existing webhooks.
+
+1. Name your webhook and enter the destination URL.
+
+1. If the destination requires authentication, enter your credentials.
+ You can enter a username and password (HTTP) or an authorization header formatted in JSON.
+
+1. Configure the webhook payload in the **Data** field.
+ You can use four variables to automate the body of your webhook. The format you use to call the variables must match the structure of how the fields are nested in your alert payload. The **Data** field can use the following four variables to auto-populate the webhook payload with information about the first alert in the alert group:
+ - `{{ alert_title }}`
+ - `{{ alert_message }}`
+ - `{{ alert_url }}`
+ - `{{ alert_payload }}`
+
+
+ `alert_payload` is always the first level of any variable you want to call.
+
+ The following is an example of an entry in the **Data** field that might return an alert name and description.
+
+ ```json
+ {
+ "name": "{{ alert_payload.labels.alertname }}",
+ "message": "{{ alert_payload.annotations.description }}"
+ }
+ ```
+
+ >**NOTE:** If you get an error message and cannot create a webhook, make sure your JSON is formatted correctly.
+
+1. Click **Create Webhook**.
\ No newline at end of file
diff --git a/docs/sources/integrations/webhooks/create-custom-templates.md b/docs/sources/integrations/webhooks/create-custom-templates.md
new file mode 100644
index 0000000000..631119d3dd
--- /dev/null
+++ b/docs/sources/integrations/webhooks/create-custom-templates.md
@@ -0,0 +1,149 @@
++++
+title = "Format alerts with templates"
+keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "Jinja"]
+weight = 300
++++
+
+# Format alerts with templates
+
+Grafana OnCall works with over one thousand alert monitoring systems. Almost any monitoring system can send alerts using webhooks with JSON payloads.
+
+By default, webhooks will deliver raw JSON. To modify the payload to be more human-readable, you can format your alerts fields that OnCall recognizes. You can use Jinja templates for more advanced customization.
+
+## JSON alerting object
+
+Alerts we receive contain metadata as keys and values in a JSON object. The following is an example of an alert from Grafana:
+```json
+{
+ "dashboardId":1,
+ "title":"[Alerting] Panel Title alert",
+ "message":"Notification Message",
+ "evalMatches":[
+ {
+ "value":1,
+ "metric":"Count",
+ "tags":{}
+ }
+ ],
+ "imageUrl":"https://grafana.com/static/assets/img/blog/mixed_styles.png",
+ "orgId":1,
+ "panelId":2,
+ "ruleId":1,
+ "ruleName":"Panel Title alert",
+ "ruleUrl":"http://localhost:3000/d/hZ7BuVbWz/test-dashboard?fullscreen\u0026edit\u0026tab=alert\u0026panelId=2\u0026orgId=1",
+ "state":"alerting",
+ "tags":{
+ "tag name":"tag value"
+ }
+}
+```
+
+## The alert payload
+Once an alert is received by Grafana OnCall, the following occurs, based on the alert content:
+
+1. The most useful information is shown in a readable format.
+
+1. Noise is minimized by grouping alerts, combining similar alerts into a single page.
+
+1. The alert group is resolved if the monitoring system tells Grafana OnCall to do so.
+
+In Grafana OnCall every alert and alert group has the following fields:
+- `Title`, `message` and `image url`
+- `Grouping Id`
+- `Resolve Signal`
+
+The JSON payload is converted. For example:
+* `{{ payload.title }}` -> Title
+* `{{ payload.message }}` -> Message
+* `{{ payload.imageUrl }}` -> Image Url
+
+The result is that each field of the alert in OnCall is now mapped to the JSON payload keys. This also true for the alert behavior:
+* `{{ payload.ruleId }}` -> Grouping Id
+* `{{ 1 if payload.state == 'OK' else 0 }}` -> Resolve Signal
+
+
+OnCall has default Jinja templates for the most popular monitoring systems.
+
+If your monitoring system is not in the Grafana OnCAll integrations list you can create the most generic integration `Webhook`, send an alert, and write your own templates.
+
+As a best practice, add `_Playbooks_`, `_Useful links_`, or `_Checklists_` to the alert message.
+
+
+## How to customize templates
+
+You can customize the default templates in Grafana OnCall by opening the **Settings** window in either the **Integrations** or **Alert Groups** tab:
+
+1. From the **Integrations** tab, select the integration, then click the **Settings** (gear) icon.
+
+
+
+1. From the **Alert Groups** tab, click **Edit rendering, grouping, and other templates**
+
+
+
+1. In **Settings**, select the template to edit from **Edit template for**.
+
+1. Edit the Appearances template as needed:
+ * `Title`, `Message`, `Image url` for Web
+ * `Title`, `Message`, `Image url` for Slack
+ * `Title` used in SMS
+ * `Title` used in Phone
+ * `Title`, `Message` used in Email
+
+1. Edit the alert behavior as needed:
+ * `Grouping Id` - This output groups other alerts into a single alert group.
+ * `Acknowledge Condition` - The output should be `ok`, `true`, or `1` to auto-acknowledge the alert group. For example, `{{ 1 if payload.state == 'OK' else 0 }}`.
+ * `Resolve Condition` - The output should be `ok`, `true` or `1` to auto-resolve the alert group. For example, `{{ 1 if payload.state == 'OK' else 0 }}`.
+ * `Source Link` - Used to customize the URL link to provide as the "source" of the alert.
+
+## Advanced Jinja templates
+ Grafana OnCall uses [Jinja templating language](http://jinja.pocoo.org/docs/2.10/) to format alert groups for the Web, Slack, phone calls, SMS messages, and more because the JSON format is not easily readable by humans. As a result, you can decide what you want to see when an alert group is triggered as well as how it should be presented.
+
+Jinja2 offers simple but multi-faceted functionality by using loops, conditions, functions, and more.
+
+> **NOTE:** Every alert from a monitoring system comes in the key/value format.
+ Grafana OnCall has rules about which of the keys match to: `__title`, `message`, `image`, `grouping`, and `auto-resolve__`.
+
+### Loops
+
+Monitoring systems can send an array of values. In this example, you can use Jinja to iterate and format the alert using a Grafana example:
+```.jinja2
+*Values:*
+ {% for evalMatch in payload.evalMatches -%}
+ `{{ evalMatch['metric'] }}: '{{ evalMatch['value'] -}}'`{{ " " }}
+ {%- endfor %}
+```
+
+### Conditions
+You can add instructions if an alert comes from a specified Grafana alert rule:
+
+```jinja2
+{% if payload.ruleId == '1' -%}
+*Alert TODOs*
+1. Get acess to the container
+ ```
+ kubectl port-forward service/example 3000:80
+ ```
+2. Check for the exception.
+3. Open the container and reload caches.
+4. Click Custom Button `Send to Jira`
+{%- endif -%}
+```
+
+### Built-in Jinja functions
+
+Jinja2 includes built-in functions that can also be used in Grafana OnCall. For example:
+```.jinja2
+{{ payload | tojson_pretty }}
+```
+Built-in functions:
+* `abs`
+* `capitalize`
+* `trim`
+* You can see the full list of Jinja built-in functions on github [here](https://github.com/pallets/jinja/blob/3915eb5c2a7e2e4d49ebdf0ecb167ea9c21c60b2/src/jinja2/filters.py#L1307)
+
+### Functions added by Grafana OnCall
+* `time` - current time
+* `tojson_pretty` - JSON prettified
+* `iso8601_to_time` - converts time from iso8601 (`2015-02-17T18:30:20.000Z`) to datetime
+* `datetimeformat` - converts time from datetime to the given format (`%H:%M / %d-%m-%Y` by default)
diff --git a/docs/sources/manage-alert-groups.md b/docs/sources/manage-alert-groups.md
new file mode 100644
index 0000000000..78cb40a799
--- /dev/null
+++ b/docs/sources/manage-alert-groups.md
@@ -0,0 +1,14 @@
++++
+title = "Manage alert groups"
+description = ""
+keywords = ["Grafana", "oncall", "on-call", "calendar", "incidents", "alert groups"]
+weight = 300
++++
+
+# Manage alert groups
+
+When you create a new alert integration, alerts are sent from the alert monitoring service of that source to Grafana OnCall. When the first alert is sent, the escalation policies you have in place for that integration determine when and where notifications are sent. Alerts will continue to gather until resolved, forming an alert group. For example, if Juan, an administrator, silences a firing alert group, the alerts will continue to collect in that group until the status is **resolved**. Once this occurs, a new alert will begin the next alert group.
+
+In the **Alert Groups** tab, you can view alert groups by status. Groups are named by the name of the first alert that was fired. When you click on a group, you can view information on all of alerts that have fired, the source of the alerts, and the users assigned in the escalation chain associated with the group. You can also view the timeline of the group, which shows all of the actions associated with the configured escalation policies, and resolution notes.
+
+Administrators can change the status of individual alert groups, or can select multiple groups to edit at once. Alert group status can be changed in the following ways: `acknowledge`, `resolve`, `unresolve`, `restart`, and `silence`.
\ No newline at end of file
diff --git a/docs/sources/oncall-api-reference/_index.md b/docs/sources/oncall-api-reference/_index.md
new file mode 100644
index 0000000000..2fee042bbc
--- /dev/null
+++ b/docs/sources/oncall-api-reference/_index.md
@@ -0,0 +1,63 @@
++++
+title = "Grafana OnCall HTTP API reference"
+weight = 1300
++++
+
+# HTTP API Reference
+
+Use the following guidelines for the Grafana OnCall API.
+
+
+
+## Authentication
+
+To authorize, use the **Authorization** header:
+
+```shell
+# With shell, you can just pass the correct header with each request
+curl "api_endpoint_here" --header "Authorization: meowmeowmeow"
+```
+
+Note that `meowmeowmeow` is a valid key for test purposes.
+Replace `meowmeowmeow` with your API key in production.
+
+Grafana OnCall uses API keys to allow access to the API. You can request a new OnCall API key in the API section.
+
+An API key is specific to a user and a Grafana stack. If you want to switch to a different team configuration, request a different API key.
+
+## Pagination
+
+List endpoints such as List Integrations or List Alert Groups return multiple objects.
+
+The OnCall API returns them in pages. Note that the page size may vary.
+
+| Parameter | Meaning |
+|-----------|:-------:|
+`count` | The total number of items. It can be `0` if a request does not return any data.
+`next` | A link to the next page. It can be `null` if the next page does not contain any data.
+`previous` | A link to the previous page. It can be `null` if the previous page does not contain any data.
+`results` | The data list. Can be `[]` if a request does not return any data.
+
+## Rate Limits
+
+Grafana OnCall provides rate limits to ensure alert group notifications will be delivered to your Slack workspace even when some integrations produce a large number of alerts.
+
+### Monitoring integrations Rate Limits
+Rate limited response HTTP status: 429
+
+
+| Scope | Amount | Time Frame |
+|------------------------------|:------:|:----------:|
+| Alerts from each integration | 300 | 5 minutes |
+| Alerts from the whole team | 500 | 5 minutes |
+
+## API rate limits
+You can reduce or increase rate limits depending on platform status.
+
+| Scope | Amount | Time Frame |
+|--------------------------|:------:|:--------:|
+| API requests per API key | 300 | 5 minutes |
\ No newline at end of file
diff --git a/docs/sources/oncall-api-reference/alertgroups.md b/docs/sources/oncall-api-reference/alertgroups.md
new file mode 100644
index 0000000000..4094f0b379
--- /dev/null
+++ b/docs/sources/oncall-api-reference/alertgroups.md
@@ -0,0 +1,68 @@
++++
+title = "Alert groups HTTP API"
+aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/alertgroups/"]
+weight = 400
++++
+
+# List alert groups
+
+```shell
+curl "{{API_URL}}/api/v1/alert_groups/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "count": 1,
+ "next": null,
+ "previous": null,
+ "results": [
+ {
+ "id": "I68T24C13IFW1",
+ "integration_id": "CFRPV98RPR1U8",
+ "route_id": "RIYGUJXCPFHXY",
+ "alerts_count": 3,
+ "state": "resolved",
+ "created_at": "2020-05-19T12:37:01.430444Z",
+ "resolved_at": "2020-05-19T13:37:01.429805Z",
+ "acknowledged_at": null,
+ "title": "Memory above 90% threshold"
+ }
+ ]
+}
+```
+
+These available filter parameters should be provided as `GET` arguments:
+
+* `route_id`
+* `integration_id`
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/alert_groups/`
+
+# Delete alert groups
+
+```shell
+curl "{{API_URL}}/api/v1/alert_groups/I68T24C13IFW1/" \
+ --request DELETE \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+ --data '{
+ "mode": "wipe"
+ }'
+```
+
+|Parameter | Required | Description |
+|--------- |:--------:|:------------|
+`mode` | No | Default setting is `wipe`. `wipe` will remove the payload of all Grafana OnCall group alerts. This is useful if you sent sensitive data to OnCall. All metadata will remain. `DELETE` will trigger the removal of alert groups, alerts, and all related metadata. It will also remove alert group notifications in Slack and other destinations.
+
+>**NOTE:** `DELETE` can take a few moments to delete alert groups because Grafana OnCall interacts with 3rd party APIs such as Slack. Please check objects using `GET` to be sure the data is removed.
+
+**HTTP request**
+
+`DELETE {{API_URL}}/api/v1/alert_groups/`
\ No newline at end of file
diff --git a/docs/sources/oncall-api-reference/alerts.md b/docs/sources/oncall-api-reference/alerts.md
new file mode 100644
index 0000000000..f0c65a3839
--- /dev/null
+++ b/docs/sources/oncall-api-reference/alerts.md
@@ -0,0 +1,110 @@
++++
+title = "Alerts HTTP API"
+aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/alerts"]
+weight = 100
++++
+
+# List Alerts
+
+```shell
+curl "{{API_URL}}/api/v1/alerts/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "count": 3,
+ "next": null,
+ "previous": null,
+ "results": [
+ {
+ "id": "AA74DN7T4JQB6",
+ "alert_group_id": "I68T24C13IFW1",
+ "created_at": "2020-05-11T20:07:43Z",
+ "payload": {
+ "state": "alerting",
+ "title": "[Alerting] Test notification",
+ "ruleId": 0,
+ "message": "Someone is testing the alert notification within Grafana.",
+ "ruleUrl": "{{API_URL}}/",
+ "ruleName": "Test notification",
+ "evalMatches": [
+ {
+ "tags": null,
+ "value": 100,
+ "metric": "High value"
+ },
+ {
+ "tags": null,
+ "value": 200,
+ "metric": "Higher Value"
+ }
+ ]
+ }
+ },
+ {
+ "id": "AR9SSYFKE2PV7",
+ "alert_group_id": "I68T24C13IFW1",
+ "created_at": "2020-05-11T20:07:54Z",
+ "payload": {
+ "state": "alerting",
+ "title": "[Alerting] Test notification",
+ "ruleId": 0,
+ "message": "Someone is testing the alert notification within Grafana.",
+ "ruleUrl": "{{API_URL}}/",
+ "ruleName": "Test notification",
+ "evalMatches": [
+ {
+ "tags": null,
+ "value": 100,
+ "metric": "High value"
+ },
+ {
+ "tags": null,
+ "value": 200,
+ "metric": "Higher Value"
+ }
+ ]
+ }
+ },
+ {
+ "id": "AWJQSGEYYUFGH",
+ "alert_group_id": "I68T24C13IFW1",
+ "created_at": "2020-05-11T20:07:58Z",
+ "payload": {
+ "state": "alerting",
+ "title": "[Alerting] Test notification",
+ "ruleId": 0,
+ "message": "Someone is testing the alert notification within Grafana.",
+ "ruleUrl": "{{API_URL}}/",
+ "ruleName": "Test notification",
+ "evalMatches": [
+ {
+ "tags": null,
+ "value": 100,
+ "metric": "High value"
+ },
+ {
+ "tags": null,
+ "value": 200,
+ "metric": "Higher Value"
+ }
+ ]
+ }
+ }
+ ]
+}
+```
+
+The following available filter parameters should be provided as `GET` arguments:
+
+* `alert_group_id`
+* `search`—string-based inclusion search by alert payload
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/alerts/`
\ No newline at end of file
diff --git a/docs/sources/oncall-api-reference/escalation_chains.md b/docs/sources/oncall-api-reference/escalation_chains.md
new file mode 100644
index 0000000000..e11d59dcba
--- /dev/null
+++ b/docs/sources/oncall-api-reference/escalation_chains.md
@@ -0,0 +1,102 @@
++++
+title = "Escalation Chains HTTP API"
+aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/escalation_chains"]
+weight = 200
++++
+
+# Create an escalation chain
+
+```shell
+curl "{{API_URL}}/api/v1/escalation_chains/" \
+ --request POST \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+ --data '{
+ "name": "example-chain"
+ }'
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "FWDL7M6N6I9HE",
+ "name": "example-chain",
+ "team_id": null
+}
+```
+
+| Parameter | Required | Description |
+|-----------|:--------:|:------------|
+| name | yes | Name of the escalation chain |
+| team_id | no | ID of the team |
+
+**HTTP request**
+
+`POST {{API_URL}}/api/v1/escalation_chains/`
+
+# Get an escalation chain
+
+```shell
+curl "{{API_URL}}/api/v1/escalation_chains/F5JU6KJET33FE/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "F5JU6KJET33FE",
+ "name": "default",
+ "team_id": null
+}
+```
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/escalation_chains//`
+
+# List escalation chains
+
+```shell
+curl "{{API_URL}}/api/v1/escalation_chains/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "count": 2,
+ "next": null,
+ "previous": null,
+ "results": [
+ {
+ "id": "F5JU6KJET33FE",
+ "name": "default",
+ "team_id": null
+ }
+ ]
+}
+```
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/escalation_chains/`
+
+# Delete an escalation chain
+
+```shell
+curl "{{API_URL}}/api/v1/escalation_chains/F5JU6KJET33FE/" \
+ --request DELETE \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+**HTTP request**
+
+`DELETE {{API_URL}}/api/v1/escalation_chains//`
\ No newline at end of file
diff --git a/docs/sources/oncall-api-reference/escalation_policies.md b/docs/sources/oncall-api-reference/escalation_policies.md
new file mode 100644
index 0000000000..c2ee1ffb3d
--- /dev/null
+++ b/docs/sources/oncall-api-reference/escalation_policies.md
@@ -0,0 +1,133 @@
++++
+title = "Escalation Policies HTTP API"
+aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/escalation_policies"]
+weight = 300
++++
+
+# Create an escalation policy
+
+```shell
+curl "{{API_URL}}/api/v1/escalation_policies/" \
+ --request POST \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+ --data '{
+ "escalation_chain_id": "F5JU6KJET33FE",
+ "type": "wait",
+ "duration": 60
+ }'
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "E3GA6SJETWWJS",
+ "escalation_chain_id": "F5JU6KJET33FE",
+ "position": 0,
+ "type": "wait",
+ "duration": 60
+}
+```
+
+|Parameter | Required | Description |
+|----------|:--------:|:------------|
+`escalation_chain_id` | Yes | Each escalation policy is assigned to a specific escalation chain.
+`position` | Optional | Escalation policies execute one after another starting from `position=0`. `Position=-1` will put the escalation policy to the end of the list. A new escalation policy created with a position of an existing escalation policy will move the old one (and all following) down in the list.
+`type` | Yes | One of: `wait`, `notify_persons`, `notify_person_next_each_time`, `notify_on_call_from_schedule`, `notify_user_group`, `trigger_action`, `resolve`, `notify_whole_channel`, `notify_if_time_from_to`.
+`duration` | Optional | The duration, in seconds, when type `wait` is chosen.
+`important` | Optional | Default is `false`. Will assign "important" to personal notification rules if `true`. This can be used to distinguish alerts on which you want to be notified immediately by phone. Applicable for types `notify_persons`, `notify_on_call_from_schedule`, and `notify_user_group`.
+`action_to_trigger` | If type = `trigger_action` | ID of an action, or webhook.
+`group_to_notify` | If type = `notify_user_group` | ID of a `User Group`.
+`persons_to_notify` | If type = `notify_persons` | List of user IDs.
+`persons_to_notify_next_each_time` | If type = `notify_person_next_each_time` | List of user IDs.
+`notify_on_call _from_schedule` | If type = `notify_on_call_from_schedule` | ID of a Schedule.
+`notify_if_time_from` | If type = `notify_if_time_from_to` | UTC time represents the beginning of the time period, for example `09:00:00Z`.
+`notify_if_time_to` | If type = `notify_if_time_from_to` | UTC time represents the end of the time period, for example `18:00:00Z`.
+
+**HTTP request**
+
+`POST {{API_URL}}/api/v1/escalation_policies/`
+
+# Get an escalation policy
+
+```shell
+curl "{{API_URL}}/api/v1/escalation_policies/E3GA6SJETWWJS/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "E3GA6SJETWWJS",
+ "escalation_chain_id": "F5JU6KJET33FE",
+ "position": 0,
+ "type": "wait",
+ "duration": 60
+}
+```
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/escalation_policies//`
+
+# List escalation policies
+
+```shell
+curl "{{API_URL}}/api/v1/escalation_policies/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "count": 2,
+ "next": null,
+ "previous": null,
+ "results": [
+ {
+ "id": "E3GA6SJETWWJS",
+ "escalation_chain_id": "F5JU6KJET33FE",
+ "position": 0,
+ "type": "wait",
+ "duration": 60
+ },
+ {
+ "id": "E5JJTU52M5YM4",
+ "escalation_chain_id": "F5JU6KJET33FE",
+ "position": 1,
+ "type": "notify_person_next_each_time",
+ "persons_to_notify_next_each_time": [
+ "U4DNY931HHJS5"
+ ]
+ }
+ ]
+}
+```
+
+The following available filter parameter should be provided as a `GET` argument:
+
+* `escalation_chain_id`
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/escalation_policies/`
+
+# Delete an escalation policy
+
+```shell
+curl "{{API_URL}}/api/v1/escalation_policies/E3GA6SJETWWJS/" \
+ --request DELETE \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+**HTTP request**
+
+`DELETE {{API_URL}}/api/v1/escalation_policies//`
\ No newline at end of file
diff --git a/docs/sources/oncall-api-reference/integrations.md b/docs/sources/oncall-api-reference/integrations.md
new file mode 100644
index 0000000000..4e5ed02b1d
--- /dev/null
+++ b/docs/sources/oncall-api-reference/integrations.md
@@ -0,0 +1,288 @@
++++
+title = "Integrations HTTP API"
+aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/integrations/"]
+weight = 500
++++
+
+# Create an integration
+
+```shell
+curl "{{API_URL}}/api/v1/integrations/" \
+ --request POST \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+ --data '{
+ "type":"grafana"
+ }'
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "CFRPV98RPR1U8",
+ "name": "Grafana :blush:",
+ "team_id": null,
+ "link": "{{API_URL}}/integrations/v1/grafana/mReAoNwDm0eMwKo1mTeTwYo/",
+ "type": "grafana",
+ "default_route": {
+ "id": "RVBE4RKQSCGJ2",
+ "escalation_chain_id": "F5JU6KJET33FE",
+ "slack": {
+ "channel_id": "CH23212D"
+ }
+ },
+ "templates": {
+ "grouping_key": null,
+ "resolve_signal": null,
+ "slack": {
+ "title": null,
+ "message": null,
+ "image_url": null
+ },
+ "web": {
+ "title": null,
+ "message": null,
+ "image_url": null
+ },
+ "email": {
+ "title": null,
+ "message": null
+ },
+ "sms": {
+ "title": null
+ },
+ "phone_call": {
+ "title": null
+ },
+ "telegram": {
+ "title": null,
+ "message": null,
+ "image_url": null
+ }
+ }
+}
+```
+
+Integrations are sources of alerts and alert groups for Grafana OnCall.
+For example, to learn how to integrate Grafana OnCall with Alertmanager see [Alertmanager]({{< relref "../integrations/add-alertmanager" >}}).
+
+**HTTP request**
+
+`POST {{API_URL}}/api/v1/integrations/`
+
+# Get integration
+
+```shell
+curl "{{API_URL}}/api/v1/integrations/CFRPV98RPR1U8/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "CFRPV98RPR1U8",
+ "name": "Grafana :blush:",
+ "team_id": null,
+ "link": "{{API_URL}}/integrations/v1/grafana/mReAoNwDm0eMwKo1mTeTwYo/",
+ "type": "grafana",
+ "default_route": {
+ "id": "RVBE4RKQSCGJ2",
+ "escalation_chain_id": "F5JU6KJET33FE",
+ "slack": {
+ "channel_id": "CH23212D"
+ }
+ },
+ "templates": {
+ "grouping_key": null,
+ "resolve_signal": null,
+ "slack": {
+ "title": null,
+ "message": null,
+ "image_url": null
+ },
+ "web": {
+ "title": null,
+ "message": null,
+ "image_url": null
+ },
+ "email": {
+ "title": null,
+ "message": null
+ },
+ "sms": {
+ "title": null
+ },
+ "phone_call": {
+ "title": null
+ },
+ "telegram": {
+ "title": null,
+ "message": null,
+ "image_url": null
+ }
+ }
+}
+```
+
+This endpoint retrieves an integration. Integrations are sources of alerts and alert groups for Grafana OnCall.
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/integrations//`
+
+# List integrations
+
+```shell
+curl "{{API_URL}}/api/v1/integrations/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "count": 1,
+ "next": null,
+ "previous": null,
+ "results": [
+ {
+ "id": "CFRPV98RPR1U8",
+ "name": "Grafana :blush:",
+ "team_id": null,
+ "link": "{{API_URL}}/integrations/v1/grafana/mReAoNwDm0eMwKo1mTeTwYo/",
+ "type": "grafana",
+ "default_route": {
+ "id": "RVBE4RKQSCGJ2",
+ "escalation_chain_id": "F5JU6KJET33FE",
+ "slack": {
+ "channel_id": "CH23212D"
+ }
+ },
+ "templates": {
+ "grouping_key": null,
+ "resolve_signal": null,
+ "slack": {
+ "title": null,
+ "message": null,
+ "image_url": null
+ },
+ "web": {
+ "title": null,
+ "message": null,
+ "image_url": null
+ },
+ "email": {
+ "title": null,
+ "message": null
+ },
+ "sms": {
+ "title": null
+ },
+ "phone_call": {
+ "title": null
+ },
+ "telegram": {
+ "title": null,
+ "message": null,
+ "image_url": null
+ }
+ }
+ }
+ ]
+}
+```
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/integrations/`
+
+# Update integration
+
+```shell
+curl "{{API_URL}}/api/v1/integrations/CFRPV98RPR1U8/" \
+ --request PUT \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+ --data '{
+ "templates": {
+ "grouping_key": null,
+ "resolve_signal": null,
+ "slack": {
+ "title": null,
+ "message": null,
+ "image_url": null
+ }
+ }
+ }'
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "CFRPV98RPR1U8",
+ "name": "Grafana :blush:",
+ "team_id": null,
+ "link": "{{API_URL}}/integrations/v1/grafana/mReAoNwDm0eMwKo1mTeTwYo/",
+ "type": "grafana",
+ "default_route": {
+ "id": "RVBE4RKQSCGJ2",
+ "escalation_chain_id": "F5JU6KJET33FE",
+ "slack": {
+ "channel_id": "CH23212D"
+ }
+ },
+ "templates": {
+ "grouping_key": null,
+ "resolve_signal": null,
+ "slack": {
+ "title": null,
+ "message": null,
+ "image_url": null
+ },
+ "web": {
+ "title": null,
+ "message": null,
+ "image_url": null
+ },
+ "email": {
+ "title": null,
+ "message": null
+ },
+ "sms": {
+ "title": null
+ },
+ "phone_call": {
+ "title": null
+ },
+ "telegram": {
+ "title": null,
+ "message": null,
+ "image_url": null
+ }
+ }
+}
+```
+
+**HTTP request**
+
+`PUT {{API_URL}}/api/v1/integrations//`
+
+# Delete integration
+Deleted integrations will stop recording new alerts from monitoring. Integration removal won't trigger removal of related alert groups or alerts.
+
+```shell
+curl "{{API_URL}}/api/v1/integrations/CFRPV98RPR1U8/" \
+ --request DELETE \
+ --header "Authorization: meowmeowmeow"
+```
+
+**HTTP request**
+
+`DELETE {{API_URL}}/api/v1/integrations//`
diff --git a/docs/sources/oncall-api-reference/on_call_shifts.md b/docs/sources/oncall-api-reference/on_call_shifts.md
new file mode 100644
index 0000000000..fc78a9ed53
--- /dev/null
+++ b/docs/sources/oncall-api-reference/on_call_shifts.md
@@ -0,0 +1,220 @@
++++
+title = "OnCall shifts HTTP API"
+aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/on_call_shifts/"]
+weight = 600
++++
+
+# Create an OnCall shift
+
+```shell
+curl "{{API_URL}}/api/v1/on_call_shifts/" \
+ --request POST \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+ --data '{
+ "name": "Demo single event",
+ "type": "single_event",
+ "team_id": null,
+ "time_zone": null,
+ "level": 0,
+ "start": "2020-09-10T08:00:00",
+ "duration": 10800,
+ "users": [
+ "U4DNY931HHJS5"
+ ]
+ }'
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "OH3V5FYQEYJ6M",
+ "name": "Demo single event",
+ "type": "single_event",
+ "team_id": null,
+ "time_zone": null,
+ "level": 0,
+ "start": "2020-09-10T08:00:00",
+ "duration": 10800,
+ "users": [
+ "U4DNY931HHJS5"
+ ]
+}
+```
+
+| Parameter | Unique | Required | Description |
+|-----------|:------:|:--------:|:------------|
+`name` | Yes | Yes | On-call shift name.
+`type` | No | Yes | One of: `single_event`, `recurrent_event`, `rolling_users`.
+`team_id` | No | ID of the team.
+`time_zone` | No | Optional | On-call shift time zone. Default is local schedule time zone. **This field will override the schedule time zone if changed**. For more information see [time zones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones).
+`level` | No | Optional | Priority level. The higher the value, the higher the priority. If two events overlap in one schedule, Grafana OnCall will choose the event with higher level. For example: Alex is on-call from 8AM till 11AM with level 1, Bob is on-call from 9AM till 11AM with level 2. At 10AM Grafana OnCall will notify Bob. At 8AM OnCall will notify Alex.
+`start` | No | Yes | Start time of the on-call shift. This parameter takes a date format as `yyyy-MM-dd'T'HH:mm:ss` (for example "2020-09-05T08:00:00").
+`duration` | No | Yes | Duration of the event.
+`frequency` | No | If type = `recurrent_event` or `rolling_users` | One of: `daily`, `weekly`, `monthly`.
+`interval` | No | Optional | This parameter takes a positive integer that represents the intervals that the recurrence rule repeats.
+`week_start` | No | Optional | Start day of the week in iCal format. One of: `SU` (Sunday), `MO` (Monday), `TU` (Tuesday), `WE` (Wednesday), `TH` (Thursday), `FR` (Friday), `SA` (Saturday). Default: `SU`.
+`by_day` | No | Optional | List of days in iCal format. Valid values are: `SU`, `MO`, `TU`, `WE`, `TH`, `FR`, `SA`.
+`by_month` | No | Optional | List of months. Valid values are `1` to `12`.
+`by_monthday` | No | Optional | List of days of the month. Valid values are `1` to `31` or `-31` to `-1`.
+`users` | No | Optional | List of on-call users.
+`rolling_users` | No | Optional | List of lists with on-call users (for `rolling_users` event type). Grafana OnCall will iterate over lists of users for every time frame specified in `frequency`. For example: there are two lists of users in `rolling_users` : [[Alex, Bob], [Alice]] and `frequency` = `daily` . This means that the first day Alex and Bob will be notified. The next day: Alice. The day after: Alex and Bob again and so on.
+`start_rotation_from_user_index` | No | Optional | Index of the list of users in `rolling_users`, from which on-call rotation starts. By default, the start index is `0`
+
+Please see [RFC 5545](https://tools.ietf.org/html/rfc5545#section-3.3.10) for more information about recurrence rules.
+
+**HTTP request**
+
+`POST {{API_URL}}/api/v1/on_call_shifts/`
+
+# Get OnCall shifts
+
+```shell
+curl "{{API_URL}}/api/v1/on_call_shifts/SBM7DV7BKFUYU/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "OH3V5FYQEYJ6M",
+ "name": "Demo single event",
+ "type": "single_event",
+ "team_id": null,
+ "time_zone": null,
+ "level": 0,
+ "start": "2020-09-10T08:00:00",
+ "duration": 10800,
+ "users": [
+ "U4DNY931HHJS5"
+ ]
+}
+```
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/on_call_shifts//`
+
+# List OnCall shifts
+
+```shell
+curl "{{API_URL}}/api/v1/on_call_shifts/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "count": 2,
+ "next": null,
+ "previous": null,
+ "results": [
+ {
+ "id": "OH3V5FYQEYJ6M",
+ "name": "Demo single event",
+ "type": "single_event",
+ "team_id": null,
+ "time_zone": null,
+ "level": 0,
+ "start": "2020-09-10T08:00:00",
+ "duration": 10800,
+ "users": [
+ "U4DNY931HHJS5"
+ ]
+ },
+ {
+ "id": "O9WTH7CKM3KZW",
+ "name": "Demo recurrent event",
+ "type": "recurrent_event",
+ "team_id": null,
+ "time_zone": null,
+ "level": 0,
+ "start": "2020-09-10T16:00:00",
+ "duration": 10800,
+ "frequency": "weekly",
+ "interval": 2,
+ "week_start": "SU",
+ "by_day": [
+ "MO",
+ "WE",
+ "FR"
+ ],
+ "by_month": null,
+ "by_monthday": null,
+ "users": [
+ "U4DNY931HHJS5"
+ ]
+ }
+ ]
+}
+```
+
+The following available filter parameters should be provided as `GET` arguments:
+
+* `name` (Exact match)
+* `schedule_id` (Exact match)
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/on_call_shifts/`
+
+# Update OnCall shift
+
+```shell
+curl "{{API_URL}}/api/v1/on_call_shifts/S3Z477AHDXTMF/" \
+ --request PUT \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+ --data '{
+ "name": "Demo single event",
+ "type": "single_event",
+ "level": 0,
+ "start": "2020-09-10T08:00:00",
+ "duration": 10800,
+ "users": [
+ "U4DNY931HHJS5"
+ ]
+ }'
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "OH3V5FYQEYJ6M",
+ "name": "Demo single event",
+ "type": "single_event",
+ "team_id": null,
+ "time_zone": null,
+ "level": 0,
+ "start": "2020-09-10T08:00:00",
+ "duration": 10800,
+ "users": [
+ "U4DNY931HHJS5"
+ ]
+}
+```
+
+**HTTP request**
+
+`PUT {{API_URL}}/api/v1/on_call_shifts//`
+
+# Delete OnCall shift
+
+```shell
+curl "{{API_URL}}/api/v1/on_call_shifts/S3Z477AHDXTMF/" \
+ --request DELETE \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+**HTTP request**
+
+`DELETE {{API_URL}}/api/v1/on_call_shifts//`
\ No newline at end of file
diff --git a/docs/sources/oncall-api-reference/outgoing_webhooks.md b/docs/sources/oncall-api-reference/outgoing_webhooks.md
new file mode 100644
index 0000000000..5e7e399f12
--- /dev/null
+++ b/docs/sources/oncall-api-reference/outgoing_webhooks.md
@@ -0,0 +1,38 @@
++++
+title = "Outgoing webhooks HTTP API"
+aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/outgoing_webhooks/"]
+weight = 700
++++
+
+# Outgoing webhooks (actions)
+
+Used in escalation policies with type `trigger_action`.
+
+## List actions
+
+```shell
+curl "{{API_URL}}/api/v1/actions/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "count": 1,
+ "next": null,
+ "previous": null,
+ "results": [
+ {
+ "id": "KGEFG74LU1D8L",
+ "name": "Publish alert group notification to JIRA"
+ }
+ ]
+}
+```
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/actions/`
\ No newline at end of file
diff --git a/docs/sources/oncall-api-reference/personal_notification_rules.md b/docs/sources/oncall-api-reference/personal_notification_rules.md
new file mode 100644
index 0000000000..dca60f79a9
--- /dev/null
+++ b/docs/sources/oncall-api-reference/personal_notification_rules.md
@@ -0,0 +1,142 @@
++++
+title = "Personal Notification Rules HTTP API"
+aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/personal_notification_rules/"]
+weight = 800
++++
+
+# Post a personal notification rule
+
+```shell
+curl "{{API_URL}}/api/v1/personal_notification_rules/" \
+ --request POST \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+ --data '{
+ "user_id": "U4DNY931HHJS5",
+ "type": "notify_by_sms"
+ }'
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "NT79GA9I7E4DJ",
+ "user_id": "U4DNY931HHJS5",
+ "position": 0,
+ "important": false,
+ "type": "notify_by_sms"
+}
+```
+
+| Parameter | Required | Description |
+|-----------|:--------:|:------------|
+`user_id` | Yes | User ID
+`position` | Optional | Personal notification rules execute one after another starting from `position=0`. `Position=-1` will put the escalation policy to the end of the list. A new escalation policy created with a position of an existing escalation policy will move the old one (and all following) down on the list.
+`type` | Yes | One of: `wait`, `notify_by_slack`, `notify_by_sms`, `notify_by_phone_call`, `notify_by_telegram`, `notify_by_email`.
+`duration` | Optional | A time in secs when type `wait` is chosen for `type`.
+`important` | Optional | Boolean value indicates if a rule is "important". Default is `false`.
+
+**HTTP request**
+
+`POST {{API_URL}}/api/v1/personal_notification_rules/`
+
+# Get personal notification rule
+
+```shell
+curl "{{API_URL}}/api/v1/personal_notification_rules/ND9EHN5LN1DUU/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "ND9EHN5LN1DUU",
+ "user_id": "U4DNY931HHJS5",
+ "position": 1,
+ "duration": 300,
+ "important": false,
+ "type": "wait"
+}
+```
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/personal_notification_rules//`
+
+
+# List personal notification rules
+
+```shell
+curl "{{API_URL}}/api/v1/personal_notification_rules/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following ways:
+
+```json
+{
+ "count": 4,
+ "next": null,
+ "previous": null,
+ "results": [
+ {
+ "id": "NT79GA9I7E4DJ",
+ "user_id": "U4DNY931HHJS5",
+ "position": 0,
+ "important": false,
+ "type": "notify_by_sms"
+ },
+ {
+ "id": "ND9EHN5LN1DUU",
+ "user_id": "U4DNY931HHJS5",
+ "position": 1,
+ "duration": 300,
+ "important": false,
+ "type": "wait"
+ },
+ {
+ "id": "NEF49YQ1HNPDD",
+ "user_id": "U4DNY931HHJS5",
+ "position": 2,
+ "important": false,
+ "type": "notify_by_phone_call"
+ },
+ {
+ "id": "NWAL6WFJNWDD8",
+ "user_id": "U4DNY931HHJS5",
+ "position": 0,
+ "important": true,
+ "type": "notify_by_phone_call"
+ }
+ ]
+}
+```
+
+The following available filter parameters should be provided as `GET` arguments:
+
+* `user_id`
+* `important`
+
+**HTTP Request**
+
+`GET {{API_URL}}/api/v1/personal_notification_rules/`
+
+# Delete a personal notification rule
+
+
+```shell
+curl "{{API_URL}}/api/v1/personal_notification_rules/NWAL6WFJNWDD8/" \
+ --request DELETE \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+**HTTP request**
+
+`DELETE {{API_URL}}/api/v1/personal_notification_rules//`
\ No newline at end of file
diff --git a/docs/sources/oncall-api-reference/postmortem_messages.md b/docs/sources/oncall-api-reference/postmortem_messages.md
new file mode 100644
index 0000000000..d156fc7161
--- /dev/null
+++ b/docs/sources/oncall-api-reference/postmortem_messages.md
@@ -0,0 +1,141 @@
++++
+title = "Postmortem Messages HTTP API"
+aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/postmortem_messages/"]
+weight = 900
+draft = true
++++
+
+# Create a postmortem message
+
+```shell
+curl "{{API_URL}}/api/v1/postmortem_messages/" \
+ --request POST \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+ --data '{
+ "alert_group_id": "I68T24C13IFW1",
+ "text": "Demo postmortem message"
+ }'
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "M4BTQUS3PRHYQ",
+ "alert_group_id": "I68T24C13IFW1",
+ "author": "U4DNY931HHJS5",
+ "source": "web",
+ "created_at": "2020-06-19T12:40:01.429805Z",
+ "text": "Demo postmortem message"
+}
+```
+
+**HTTP request**
+
+`POST {{API_URL}}/api/v1/postmortem_messages/`
+
+# Get a postmortem message
+
+```shell
+curl "{{API_URL}}/api/v1/postmortem_messages/M4BTQUS3PRHYQ/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "M4BTQUS3PRHYQ",
+ "alert_group_id": "I68T24C13IFW1",
+ "author": "U4DNY931HHJS5",
+ "source": "web",
+ "created_at": "2020-06-19T12:40:01.429805Z",
+ "text": "Demo postmortem message"
+}
+```
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/postmortem_messages//`
+
+# List postmortem messages
+
+```shell
+curl "{{API_URL}}/api/v1/postmortem_messages/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "count": 1,
+ "next": null,
+ "previous": null,
+ "results": [
+ {
+ "id": "M4BTQUS3PRHYQ",
+ "alert_group_id": "I68T24C13IFW1",
+ "author": "U4DNY931HHJS5",
+ "source": "web",
+ "created_at": "2020-06-19T12:40:01.429805Z",
+ "text": "Demo postmortem message"
+ }
+ ]
+}
+```
+
+The following available filter parameter should be provided as a `GET` argument:
+
+* `alert_group_id`
+
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/postmortem_messages/`
+
+# Update a postmortem message
+
+```shell
+curl "{{API_URL}}/api/v1/postmortem_messages/M4BTQUS3PRHYQ/" \
+ --request PUT \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+ --data '{
+ "text": "Demo postmortem message"
+ }'
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "M4BTQUS3PRHYQ",
+ "alert_group_id": "I68T24C13IFW1",
+ "author": "U4DNY931HHJS5",
+ "source": "web",
+ "created_at": "2020-06-19T12:40:01.429805Z",
+ "text": "Demo postmortem message"
+}
+```
+
+**HTTP request**
+
+`PUT {{API_URL}}/api/v1/postmortem_messages//`
+
+# Delete a postmortem message
+
+```shell
+curl "{{API_URL}}/api/v1/postmortem_messages/M4BTQUS3PRHYQ/" \
+ --request DELETE \
+ --header "Authorization: meowmeowmeow"
+```
+
+**HTTP request**
+
+`DELETE {{API_URL}}/api/v1/postmortem_messages//`
\ No newline at end of file
diff --git a/docs/sources/oncall-api-reference/postmortems.md b/docs/sources/oncall-api-reference/postmortems.md
new file mode 100644
index 0000000000..df8580f559
--- /dev/null
+++ b/docs/sources/oncall-api-reference/postmortems.md
@@ -0,0 +1,152 @@
++++
+title = "Postmortem HTTP API"
+aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/postmortems/"]
+weight = 1000
+draft = true
++++
+
+# Create a postmortem
+
+```shell
+curl "{{API_URL}}/api/v1/postmortems/" \
+ --request POST \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+ --data '{
+ "alert_group_id": "I68T24C13IFW1",
+ "text": "Demo postmortem text"
+ }'
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "P658FE5K87EWZ",
+ "alert_group_id": "I68T24C13IFW1",
+ "created_at": "2020-06-19T12:37:01.430444Z",
+ "text": "Demo postmortem text"
+}
+```
+
+**HTTP request**
+
+`POST {{API_URL}}/api/v1/postmortems/`
+
+# Get a postmortem
+
+```shell
+curl "{{API_URL}}/api/v1/postmortems/P658FE5K87EWZ/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "P658FE5K87EWZ",
+ "alert_group_id": "I68T24C13IFW1",
+ "created_at": "2020-06-19T12:37:01.430444Z",
+ "text": "Demo postmortem text",
+ "postmortem_messages": [
+ {
+ "id": "M4BTQUS3PRHYQ",
+ "alert_group_id": "I68T24C13IFW1",
+ "author": "U4DNY931HHJS5",
+ "source": "web",
+ "created_at": "2020-06-19T12:40:01.429805Z",
+ "text": "Demo postmortem message"
+ }
+ ]
+}
+```
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/postmortems//`
+
+# List postmortems
+
+```shell
+curl "{{API_URL}}/api/v1/postmortems/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "count": 1,
+ "next": null,
+ "previous": null,
+ "results": [
+ {
+ "id": "P658FE5K87EWZ",
+ "alert_group_id": "I68T24C13IFW1",
+ "created_at": "2020-06-19T12:37:01.430444Z",
+ "text": "Demo postmortem text",
+ "postmortem_messages": [
+ {
+ "id": "M4BTQUS3PRHYQ",
+ "alert_group_id": "I68T24C13IFW1",
+ "author": "U4DNY931HHJS5",
+ "source": "web",
+ "created_at": "2020-06-19T12:40:01.429805Z",
+ "text": "Demo postmortem message"
+ }
+ ]
+ }
+ ]
+}
+```
+
+The following available filter parameter should be provided with a `GET` argument:
+
+* `alert_group_id`
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/postmortems/`
+
+# Update a postmortem
+
+```shell
+curl "{{API_URL}}/api/v1/postmortems/P658FE5K87EWZ/" \
+ --request PUT \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+ --data '{
+ "text": "Demo postmortem text"
+ }'
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "P658FE5K87EWZ",
+ "alert_group_id": "I68T24C13IFW1",
+ "created_at": "2020-06-19T12:37:01.430444Z",
+ "text": "Demo postmortem text"
+}
+```
+
+**HTTP request**
+
+`PUT {{API_URL}}/api/v1/postmortems//`
+
+# Delete a postmortem
+
+```shell
+curl "{{API_URL}}/api/v1/postmortems/P658FE5K87EWZ/" \
+ --request DELETE \
+ --header "Authorization: meowmeowmeow"
+```
+
+**HTTP request**
+
+`DELETE {{API_URL}}/api/v1/postmortems//`
\ No newline at end of file
diff --git a/docs/sources/oncall-api-reference/routes.md b/docs/sources/oncall-api-reference/routes.md
new file mode 100644
index 0000000000..e75cb38877
--- /dev/null
+++ b/docs/sources/oncall-api-reference/routes.md
@@ -0,0 +1,188 @@
++++
+title = "Routes HTTP API"
+aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/routes/"]
+weight = 1100
++++
+
+# Create a route
+
+```shell
+curl "{{API_URL}}/api/v1/routes/" \
+ --request POST \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+ --data '{
+ "integration_id": "CFRPV98RPR1U8",
+ "escalation_chain_id": "F5JU6KJET33FE",
+ "routing_regex": "us-(east|west)",
+ "position": 0,
+ "slack": {
+ "channel_id": "CH23212D"
+ }
+ }'
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "RIYGUJXCPFHXY",
+ "integration_id": "CFRPV98RPR1U8",
+ "escalation_chain_id": "F5JU6KJET33FE",
+ "routing_regex": "us-(east|west)",
+ "position": 0,
+ "is_the_last_route": false,
+ "slack": {
+ "channel_id": "CH23212D"
+ }
+}
+```
+
+Routes allow you to direct different alerts to different messenger channels and escalation chains. Useful for:
+
+* Important/non-important alerts
+* Alerts for different engineering groups
+* Snoozing spam & debugging alerts
+
+| Parameter | Unique | Required | Description |
+|-----------|:------:|:--------:|:------------|
+`integration_id` | No | Yes | Each route is assigned to a specific integration.
+`escalation_chain_id` | No | Yes | Each route is assigned a specific escalation chain.
+`routing_regex` | Yes | Yes | Python Regex query (use https://regex101.com/ for debugging). OnCall chooses the route for an alert in case there is a match inside the whole alert payload.
+`position` | Yes | Optional | Route matching is performed one after another starting from position=`0`. Position=`-1` will put the route to the end of the list before `is_the_last_route`. A new route created with a position of an existing route will move the old route (and all following routes) down in the list.
+`slack` | Yes | Optional | Dictionary with Slack-specific settings for a route.
+
+**HTTP request**
+
+`POST {{API_URL}}/api/v1/routes/`
+
+# Get a route
+
+```shell
+curl "{{API_URL}}/api/v1/routes/RIYGUJXCPFHXY/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "RIYGUJXCPFHXY",
+ "integration_id": "CFRPV98RPR1U8",
+ "escalation_chain_id": "F5JU6KJET33FE",
+ "routing_regex": "us-(east|west)",
+ "position": 0,
+ "is_the_last_route": false,
+ "slack": {
+ "channel_id": "CH23212D"
+ }
+}
+```
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/routes//`
+
+
+# List routes
+
+```shell
+curl "{{API_URL}}/api/v1/routes/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "count": 2,
+ "next": null,
+ "previous": null,
+ "results": [
+ {
+ "id": "RIYGUJXCPFHXY",
+ "integration_id": "CFRPV98RPR1U8",
+ "escalation_chain_id": "F5JU6KJET33FE",
+ "routing_regex": "us-(east|west)",
+ "position": 0,
+ "is_the_last_route": false,
+ "slack": {
+ "channel_id": "CH23212D"
+ }
+ },
+ {
+ "id": "RVBE4RKQSCGJ2",
+ "integration_id": "CFRPV98RPR1U8",
+ "escalation_chain_id": "F5JU6KJET33FE",
+ "routing_regex": ".*",
+ "position": 1,
+ "is_the_last_route": true,
+ "slack": {
+ "channel_id": "CH23212D"
+ }
+ }
+ ]
+}
+```
+
+The following available filter parameters should be provided as `GET` arguments:
+
+* `integration_id`
+* `routing_regex` (Exact match)
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/routes/`
+
+# Update route
+
+```shell
+curl "{{API_URL}}/api/v1/routes/RIYGUJXCPFHXY/" \
+ --request PUT \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+ --data '{
+ "routing_regex": "us-(east|west)",
+ "position": 0,
+ "slack": {
+ "channel_id": "CH23212D"
+ }
+ }'
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "RIYGUJXCPFHXY",
+ "integration_id": "CFRPV98RPR1U8",
+ "escalation_chain_id": "F5JU6KJET33FE",
+ "routing_regex": "us-(east|west)",
+ "position": 0,
+ "is_the_last_route": false,
+ "slack": {
+ "channel_id": "CH23212D"
+ }
+}
+```
+
+**HTTP request**
+
+`PUT {{API_URL}}/api/v1/routes//`
+
+# Delete a route
+
+```shell
+curl "{{API_URL}}/api/v1/routes/RIYGUJXCPFHXY/" \
+ --request DELETE \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+**HTTP request**
+
+`DELETE {{API_URL}}/api/v1/routes//`
\ No newline at end of file
diff --git a/docs/sources/oncall-api-reference/schedules.md b/docs/sources/oncall-api-reference/schedules.md
new file mode 100644
index 0000000000..22b41b3b32
--- /dev/null
+++ b/docs/sources/oncall-api-reference/schedules.md
@@ -0,0 +1,206 @@
++++
+title = "Schedule HTTP API"
+aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/schedules/"]
+weight = 1200
++++
+
+# Create a schedule
+
+```shell
+curl "{{API_URL}}/api/v1/schedules/" \
+ --request POST \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+ --data '{
+ "name": "Demo schedule iCal",
+ "ical_url_primary": "https://example.com/meow_calendar.ics",
+ "slack": {
+ "channel_id": "MEOW_SLACK_ID",
+ "user_group_id": "MEOW_SLACK_ID"
+ }
+ }'
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "SBM7DV7BKFUYU",
+ "name": "Demo schedule iCal",
+ "type": "ical",
+ "team_id": null,
+ "ical_url_primary": "https://example.com/meow_calendar.ics",
+ "ical_url_overrides": "https://example.com/meow_calendar_overrides.ics",
+ "on_call_now": [
+ "U4DNY931HHJS5"
+ ],
+ "slack": {
+ "channel_id": "MEOW_SLACK_ID",
+ "user_group_id": "MEOW_SLACK_ID"
+ }
+}
+```
+
+| Parameter | Unique | Required | Description |
+|-----------|:------:|:--------:|:------------|
+`name` | Yes | Yes | Schedule name.
+`type` | No | Yes | Schedule type. May be `ical` (used for iCalendar integration) or `calendar` (used for manually created on-call shifts).
+`team_id` | No | No | ID of the team.
+`time_zone` | No | Optional | Schedule time zone. Is used for manually added on-call shifts in Schedules with type `calendar`. Default time zone is `UTC`. For more information about time zones, see [time zones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones).
+`ical_url_primary` | No | If type = `ical` | URL of external iCal calendar for schedule with type `ical`.
+`ical_url_overrides` | No | Optional | URL of external iCal calendar for schedule with any type. Events from this calendar override events from primary calendar or from on-call shifts.
+`slack` | No | Optional | Dictionary with Slack-specific settings for a schedule. Includes `channel_id` and `user_group_id` fields, that take a channel ID and a user group ID from Slack.
+`shifts` | No | Optional | List of shifts. Used for manually added on-call shifts in Schedules with type `calendar`.
+
+**HTTP request**
+
+`POST {{API_URL}}/api/v1/schedules/`
+
+# Get a schedule
+
+```shell
+curl "{{API_URL}}/api/v1/schedules/SBM7DV7BKFUYU/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "SBM7DV7BKFUYU",
+ "name": "Demo schedule iCal",
+ "type": "ical",
+ "team_id": null,
+ "ical_url_primary": "https://example.com/meow_calendar.ics",
+ "ical_url_overrides": "https://example.com/meow_calendar_overrides.ics",
+ "on_call_now": [
+ "U4DNY931HHJS5"
+ ],
+ "slack": {
+ "channel_id": "MEOW_SLACK_ID",
+ "user_group_id": "MEOW_SLACK_ID"
+ }
+}
+```
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/schedules//`
+
+# List schedules
+
+```shell
+curl "{{API_URL}}/api/v1/schedules/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "count": 2,
+ "next": null,
+ "previous": null,
+ "results": [
+ {
+ "id": "SBM7DV7BKFUYU",
+ "name": "Demo schedule iCal",
+ "type": "ical",
+ "team_id": null,
+ "ical_url_primary": "https://example.com/meow_calendar.ics",
+ "ical_url_overrides": "https://example.com/meow_calendar_overrides.ics",
+ "on_call_now": [
+ "U4DNY931HHJS5"
+ ],
+ "slack": {
+ "channel_id": "MEOW_SLACK_ID",
+ "user_group_id": "MEOW_SLACK_ID"
+ }
+ },
+ {
+ "id": "S3Z477AHDXTMF",
+ "name": "Demo schedule Calendar",
+ "type": "calendar",
+ "team_id": null,
+ "time_zone": "America/New_York",
+ "on_call_now": [
+ "U4DNY931HHJS5"
+ ],
+ "shifts": [
+ "OH3V5FYQEYJ6M",
+ "O9WTH7CKM3KZW"
+ ],
+ "ical_url_overrides": null,
+ "slack": {
+ "channel_id": "MEOW_SLACK_ID",
+ "user_group_id": "MEOW_SLACK_ID"
+ }
+ }
+ ]
+}
+```
+
+The following available filter parameter should be provided as a `GET` argument:
+
+* `name` (Exact match)
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/schedules/`
+
+# Update a schedule
+
+```shell
+curl "{{API_URL}}/api/v1/schedules/SBM7DV7BKFUYU/" \
+ --request PUT \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json" \
+ --data '{
+ "name": "Demo schedule iCal",
+ "ical_url": "https://example.com/meow_calendar.ics",
+ "slack": {
+ "channel_id": "MEOW_SLACK_ID"
+ }
+ }'
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "SBM7DV7BKFUYU",
+ "name": "Demo schedule iCal",
+ "type": "ical",
+ "team_id": null,
+ "ical_url_primary": "https://example.com/meow_calendar.ics",
+ "ical_url_overrides": "https://example.com/meow_calendar_overrides.ics",
+ "on_call_now": [
+ "U4DNY931HHJS5"
+ ],
+ "slack": {
+ "channel_id": "MEOW_SLACK_ID",
+ "user_group_id": "MEOW_SLACK_ID"
+ }
+}
+```
+
+**HTTP request**
+
+`PUT {{API_URL}}/api/v1/schedules//`
+
+# Delete a schedule
+
+```shell
+curl "{{API_URL}}/api/v1/schedules/SBM7DV7BKFUYU/" \
+ --request DELETE \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+**HTTP request**
+
+`DELETE {{API_URL}}/api/v1/schedules//`
\ No newline at end of file
diff --git a/docs/sources/oncall-api-reference/slack_channels.md b/docs/sources/oncall-api-reference/slack_channels.md
new file mode 100644
index 0000000000..8426cbfcbe
--- /dev/null
+++ b/docs/sources/oncall-api-reference/slack_channels.md
@@ -0,0 +1,38 @@
++++
+title = "Slack Channels HTTP API"
+aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/slack_channels/"]
+weight = 1300
++++
+
+# List Slack Channels
+
+```shell
+curl "{{API_URL}}/api/v1/slack_channels/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "count": 1,
+ "next": null,
+ "previous": null,
+ "results": [
+ {
+ "name": "meow_channel",
+ "slack_id": "MEOW_SLACK_ID"
+ }
+ ]
+}
+```
+
+The following available filter parameter should be provided as a `GET` argument:
+
+* `channel_name`
+
+**HTTP Request**
+
+`GET {{API_URL}}/api/v1/slack_channels/`
\ No newline at end of file
diff --git a/docs/sources/oncall-api-reference/user_groups.md b/docs/sources/oncall-api-reference/user_groups.md
new file mode 100644
index 0000000000..a336b1dba5
--- /dev/null
+++ b/docs/sources/oncall-api-reference/user_groups.md
@@ -0,0 +1,46 @@
++++
+title = "OnCall User Groups HTTP API"
+aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/user_groups/"]
+weight = 1400
++++
+
+
+# List user groups
+
+```shell
+curl "{{API_URL}}/api/v1/user_groups/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "count": 1,
+ "next": null,
+ "previous": null,
+ "results": [
+ {
+ "id": "GPFAPH7J7BKJB",
+ "type": "slack_based",
+ "slack": {
+ "id": "MEOW_SLACK_ID",
+ "name": "Meow Group",
+ "handle": "meow_group"
+ }
+ }
+ ]
+}
+```
+
+| Parameter | Unique | Description |
+|-----------|:------:|:------------|
+`id` | Yes| User Group ID
+`type` | No | [Slack-defined user groups](https://slack.com/intl/en-ru/help/articles/212906697-Create-a-user-group)
+`slack` | No | Metadata retrieved from Slack.
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/user_groups/`
\ No newline at end of file
diff --git a/docs/sources/oncall-api-reference/users.md b/docs/sources/oncall-api-reference/users.md
new file mode 100644
index 0000000000..9dde2cfb5b
--- /dev/null
+++ b/docs/sources/oncall-api-reference/users.md
@@ -0,0 +1,91 @@
++++
+title = "Grafana OnCall Users HTTP API"
+aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/users/"]
+weight = 1500
++++
+
+# Get a user
+
+This endpoint retrieves the user object.
+
+```shell
+```shell
+curl "{{API_URL}}/api/v1/users/current/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "id": "U4DNY931HHJS5",
+ "email": "public-api-demo-user-1@grafana.com",
+ "slack": [
+ {
+ "user_id": "UALEXSLACKDJPK",
+ "team_id": "TALEXSLACKDJPK"
+ }
+ ],
+ "username": "alex",
+ "role": "admin"
+}
+```
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/users//`
+
+Use `{{API_URL}}/api/v1/users/current` to retrieve the current user.
+
+| Parameter | Unique | Description |
+|-----------|:------:|:------------|
+`id` | Yes/org | User ID
+`email` | Yes/org | User e-mail
+`slack` | Yes/org | List of user IDs from connected Slack. User linking key is e-mail.
+`username` | Yes/org | User username
+`role` | No | One of: `user`, `observer`, `admin`.
+
+# List Users
+
+```shell
+curl "{{API_URL}}/api/v1/users/" \
+ --request GET \
+ --header "Authorization: meowmeowmeow" \
+ --header "Content-Type: application/json"
+```
+
+The above command returns JSON structured in the following way:
+
+```json
+{
+ "count": 1,
+ "next": null,
+ "previous": null,
+ "results": [
+ {
+ "id": "U4DNY931HHJS5",
+ "email": "public-api-demo-user-1@grafana.com",
+ "slack": [
+ {
+ "user_id": "UALEXSLACKDJPK",
+ "team_id": "TALEXSLACKDJPK"
+ }
+ ],
+ "username": "alex",
+ "role": "admin"
+ }
+ ]
+}
+```
+
+This endpoint retrieves all users.
+
+The following available filter parameter should be provided as a `GET` argument:
+
+* `username` (Exact match)
+
+**HTTP request**
+
+`GET {{API_URL}}/api/v1/users/`
\ No newline at end of file
diff --git a/engine/Dockerfile b/engine/Dockerfile
new file mode 100644
index 0000000000..4a7366207e
--- /dev/null
+++ b/engine/Dockerfile
@@ -0,0 +1,20 @@
+FROM python:3.9-alpine
+RUN apk add bash python3-dev build-base linux-headers pcre-dev mariadb-connector-c-dev openssl-dev libffi-dev git
+RUN pip install uwsgi
+
+WORKDIR /etc/app
+COPY ./requirements.txt ./
+RUN pip install regex==2021.11.2
+RUN pip install -r requirements.txt
+
+COPY ./ ./
+
+RUN DJANGO_SETTINGS_MODULE=settings.prod_without_db SECRET_KEY="ThEmUsTSecretKEYforBUILDstage123" TELEGRAM_TOKEN="0000000000:XXXXXXXXXXXXXXXXXXXXXXXXXXXX-XXXXXX" SLACK_CLIENT_OAUTH_ID=1 python manage.py collectstatic --no-input
+RUN rm db.sqlite3
+
+# This is required for prometheus_client to sync between uwsgi workers
+RUN mkdir -p /tmp/prometheus_django_metrics;
+RUN chown -R 1000:2000 /tmp/prometheus_django_metrics
+ENV prometheus_multiproc_dir "/tmp/prometheus_django_metrics"
+
+CMD [ "uwsgi", "--ini", "uwsgi.ini" ]
diff --git a/engine/Dockerfile.all-in-one b/engine/Dockerfile.all-in-one
new file mode 100644
index 0000000000..6b0e5d43a8
--- /dev/null
+++ b/engine/Dockerfile.all-in-one
@@ -0,0 +1,38 @@
+FROM python:3.9-alpine
+
+RUN apk add bash
+RUN apk add python3-dev
+RUN apk add build-base
+RUN apk add linux-headers
+RUN apk add pcre-dev
+RUN apk add mariadb-connector-c-dev
+RUN apk add openssl-dev
+RUN apk add libffi-dev
+RUN apk add git
+RUN apk add curl
+RUN apk add redis
+
+RUN pip install uwsgi
+RUN pip install regex==2021.11.2
+
+WORKDIR /etc/app
+COPY ./requirements.txt ./requirements.txt
+RUN pip install -r requirements.txt
+
+COPY ./scripts/start_all_in_one.sh ./start_all_in_one.sh
+
+COPY ./ ./
+RUN rm db.sqlite3 || true
+
+RUN DJANGO_SETTINGS_MODULE=settings.prod_without_db \
+ SECRET_KEY="ThEmUsTSecretKEYforBUILDstage123" \
+ TELEGRAM_TOKEN="0000000000:XXXXXXXXXXXXXXXXXXXXXXXXXXXX-XXXXXX" \
+ SLACK_CLIENT_OAUTH_ID=1 python manage.py collectstatic --no-input
+
+VOLUME /etc/app/sqlite_data
+VOLUME /etc/app/secret_data
+VOLUME /etc/app/redis_data
+
+EXPOSE 8000
+
+CMD ["bash", "./start_all_in_one.sh"]
diff --git a/engine/apps/__init__.py b/engine/apps/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/alerts/__init__.py b/engine/apps/alerts/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/alerts/admin.py b/engine/apps/alerts/admin.py
new file mode 100644
index 0000000000..b78e405182
--- /dev/null
+++ b/engine/apps/alerts/admin.py
@@ -0,0 +1,71 @@
+from django.contrib import admin
+
+from common.admin import CustomModelAdmin
+
+from .models import (
+ Alert,
+ AlertGroup,
+ AlertGroupLogRecord,
+ AlertReceiveChannel,
+ ChannelFilter,
+ CustomButton,
+ EscalationChain,
+ EscalationPolicy,
+ Invitation,
+)
+
+
+@admin.register(Alert)
+class AlertAdmin(CustomModelAdmin):
+ list_display = ("id", "public_primary_key", "group", "title", "created_at")
+ list_filter = ("created_at",)
+
+
+@admin.register(AlertGroup)
+class AlertGroupAdmin(CustomModelAdmin):
+ list_display = ("id", "public_primary_key", "verbose_name", "channel", "channel_filter", "state", "started_at")
+ list_filter = ("started_at",)
+
+ def get_queryset(self, request):
+ return AlertGroup.all_objects
+
+
+@admin.register(AlertGroupLogRecord)
+class AlertGroupLogRecord(CustomModelAdmin):
+ list_display = ("id", "alert_group", "escalation_policy", "type", "created_at")
+ list_filter = ("created_at", "type")
+
+
+@admin.register(AlertReceiveChannel)
+class AlertReceiveChannelAdmin(CustomModelAdmin):
+ list_display = ("id", "public_primary_key", "integration", "token", "created_at", "deleted_at")
+ list_filter = ("integration",)
+
+ def get_queryset(self, request):
+ return AlertReceiveChannel.objects_with_deleted
+
+
+@admin.register(ChannelFilter)
+class ChannelFilterAdmin(CustomModelAdmin):
+ list_display = ("id", "public_primary_key", "alert_receive_channel", "escalation_chain", "filtering_term", "order")
+
+
+@admin.register(CustomButton)
+class CustomButtonModelAdmin(CustomModelAdmin):
+ list_display = ("id", "public_primary_key", "name", "webhook")
+
+
+@admin.register(EscalationChain)
+class EscalationChainAdmin(CustomModelAdmin):
+ list_display = ("id", "public_primary_key", "organization", "name")
+
+
+@admin.register(EscalationPolicy)
+class EscalationPolicyAdmin(CustomModelAdmin):
+ list_display = ("id", "public_primary_key", "escalation_chain", "step_type_verbal", "order")
+
+
+@admin.register(Invitation)
+class InvitationAdmin(CustomModelAdmin):
+ list_display = ("id", "alert_group", "author", "invitee", "is_active", "created_at")
+ list_filter = ("is_active", "created_at")
diff --git a/engine/apps/alerts/constants.py b/engine/apps/alerts/constants.py
new file mode 100644
index 0000000000..6d5dd0b87c
--- /dev/null
+++ b/engine/apps/alerts/constants.py
@@ -0,0 +1,12 @@
+class ActionSource:
+ (
+ SLACK,
+ WEB,
+ TWILIO,
+ TELEGRAM,
+ ) = range(4)
+
+
+TASK_DELAY_SECONDS = 1
+
+NEXT_ESCALATION_DELAY = 5
diff --git a/engine/apps/alerts/escalation_snapshot/__init__.py b/engine/apps/alerts/escalation_snapshot/__init__.py
new file mode 100644
index 0000000000..cea65baa17
--- /dev/null
+++ b/engine/apps/alerts/escalation_snapshot/__init__.py
@@ -0,0 +1 @@
+from .escalation_snapshot_mixin import EscalationSnapshotMixin # noqa: F401
diff --git a/engine/apps/alerts/escalation_snapshot/escalation_snapshot_mixin.py b/engine/apps/alerts/escalation_snapshot/escalation_snapshot_mixin.py
new file mode 100644
index 0000000000..0ec5e67ca1
--- /dev/null
+++ b/engine/apps/alerts/escalation_snapshot/escalation_snapshot_mixin.py
@@ -0,0 +1,272 @@
+import logging
+from typing import Optional
+
+import pytz
+from celery import uuid as celery_uuid
+from dateutil.parser import parse
+from django.apps import apps
+from django.utils import timezone
+from django.utils.functional import cached_property
+
+from apps.alerts.constants import NEXT_ESCALATION_DELAY
+from apps.alerts.escalation_snapshot.snapshot_classes import (
+ ChannelFilterSnapshot,
+ EscalationChainSnapshot,
+ EscalationPolicySnapshot,
+ EscalationSnapshot,
+)
+from apps.alerts.escalation_snapshot.utils import eta_for_escalation_step_notify_if_time
+from apps.alerts.tasks import calculate_escalation_finish_time, escalate_alert_group
+from apps.slack.scenarios.scenario_step import ScenarioStep
+
+logger = logging.getLogger(__name__)
+
+
+class EscalationSnapshotMixin:
+ """
+ Mixin for AlertGroup. It contains methods related with alert group escalation
+ """
+
+ def build_raw_escalation_snapshot(self) -> dict:
+ """
+ Builds new escalation chain in a json serializable format (dict).
+ Use this method to prepare escalation chain data for saving to alert group before start new escalation.
+
+ Example result:
+ {
+ 'channel_filter_snapshot': {
+ 'id': 1,
+ 'notify_in_slack': True,
+ 'str_for_clients': 'default',
+ 'notify_in_telegram': True
+ },
+ 'escalation_chain_snapshot': {
+ 'id': 1,
+ 'name': 'Test'
+ },
+ 'escalation_policies_snapshots': [
+ {
+ 'id': 1,
+ 'step': 14,
+ 'order': 0,
+ 'to_time': None,
+ 'from_time': None,
+ 'num_alerts_in_window': None,
+ 'num_minutes_in_window': None,
+ 'wait_delay': None,
+ 'notify_schedule': None,
+ 'notify_to_group': None,
+ 'passed_last_time': None,
+ 'escalation_counter': 0,
+ 'last_notified_user': None,
+ 'custom_button_trigger': None,
+ 'notify_to_users_queue': [1,2,3]
+ },
+ {
+ 'id': 2,
+ 'step': 0,
+ 'order': 1,
+ 'to_time': None,
+ 'from_time': None,
+ 'num_alerts_in_window': None,
+ 'num_minutes_in_window': None,
+ 'wait_delay': '00:05:00',
+ 'notify_schedule': None,
+ 'notify_to_group': None,
+ 'passed_last_time': None,
+ 'escalation_counter': 0,
+ 'last_notified_user': None,
+ 'custom_button_trigger': None,
+ 'notify_to_users_queue': []
+ },
+ ],
+ 'slack_channel_id': 'SLACK_CHANNEL_ID',
+ 'last_active_escalation_policy_order': None,
+ 'pause_escalation': False,
+ 'next_step_eta': '2021-10-18T10:28:28.890369Z
+ }
+ """
+
+ escalation_snapshot = None
+
+ if self.escalation_chain_exists:
+ channel_filter = self.channel_filter
+ escalation_chain = channel_filter.escalation_chain
+ escalation_policies = escalation_chain.escalation_policies.all()
+
+ data = {
+ "channel_filter_snapshot": channel_filter,
+ "escalation_chain_snapshot": escalation_chain,
+ "escalation_policies_snapshots": escalation_policies,
+ "slack_channel_id": self.slack_channel_id,
+ }
+ escalation_snapshot = EscalationSnapshot.serializer(data).data
+ return escalation_snapshot
+
+ def calculate_eta_for_finish_escalation(self, escalation_started=False, start_time=None):
+ if not self.escalation_snapshot:
+ return
+ EscalationPolicy = apps.get_model("alerts", "EscalationPolicy")
+ TOLERANCE_SECONDS = 1
+ TOLERANCE_TIME = timezone.timedelta(seconds=NEXT_ESCALATION_DELAY + TOLERANCE_SECONDS)
+ start_time = start_time or timezone.now() # start time may be different for silenced incidents
+ wait_summ = timezone.timedelta()
+ # Get next_active_escalation_policy_order using flag `escalation_started` because this calculation can be
+ # started in parallel with escalation task where next_active_escalation_policy_order can be changed.
+ # That's why we are using `escalation_started` flag here, which means, that we want count eta from the first
+ # step.
+ next_escalation_policy_order = (
+ self.escalation_snapshot.next_active_escalation_policy_order if escalation_started else 0
+ )
+ escalation_policies = self.escalation_snapshot.escalation_policies_snapshots[next_escalation_policy_order:]
+ for escalation_policy in escalation_policies:
+ if escalation_policy.step == EscalationPolicy.STEP_WAIT:
+ if escalation_policy.wait_delay is not None:
+ wait_summ += escalation_policy.wait_delay
+ else:
+ wait_summ += EscalationPolicy.DEFAULT_WAIT_DELAY # Default wait in case it's not selected yet
+ elif escalation_policy.step == EscalationPolicy.STEP_NOTIFY_IF_TIME:
+ if escalation_policy.from_time and escalation_policy.to_time:
+ estimate_start_time = start_time + wait_summ
+ STEP_TOLERANCE = timezone.timedelta(minutes=1)
+ next_step_estimate_start_time = eta_for_escalation_step_notify_if_time(
+ escalation_policy.from_time,
+ escalation_policy.to_time,
+ estimate_start_time + STEP_TOLERANCE,
+ )
+ wait_summ += next_step_estimate_start_time - estimate_start_time
+ elif escalation_policy.step == EscalationPolicy.STEP_REPEAT_ESCALATION_N_TIMES:
+ # the part of escalation with repeat step will be passed six times: the first time plus five repeats
+ wait_summ *= EscalationPolicy.MAX_TIMES_REPEAT + 1
+ elif escalation_policy.step == EscalationPolicy.STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW:
+ # In this case we cannot calculate finish time, so we return None
+ return
+ elif escalation_policy.step == EscalationPolicy.STEP_FINAL_RESOLVE:
+ break
+ wait_summ += TOLERANCE_TIME
+
+ escalation_finish_time = start_time + wait_summ
+ return escalation_finish_time
+
+ @property
+ def channel_filter_with_respect_to_escalation_snapshot(self):
+ # Try to get saved channel filter data from escalation snapshot at first because channel filter object
+ # can be changed or deleted during escalation
+ return self.channel_filter_snapshot or self.channel_filter
+
+ @property
+ def escalation_chain_with_respect_to_escalation_snapshot(self):
+ # Try to get saved escalation chain data from escalation snapshot at first because escalation chain object
+ # can be changed or deleted during escalation
+ return self.escalation_chain_snapshot or (self.channel_filter.escalation_chain if self.channel_filter else None)
+
+ @cached_property
+ def channel_filter_snapshot(self) -> Optional[ChannelFilterSnapshot]:
+ # in some cases we need only channel filter and don't want to serialize whole escalation
+ channel_filter_snapshot_object = None
+ escalation_snapshot = self.raw_escalation_snapshot
+ if escalation_snapshot is not None:
+ channel_filter_snapshot = ChannelFilterSnapshot.serializer().to_internal_value(
+ escalation_snapshot["channel_filter_snapshot"]
+ )
+ channel_filter_snapshot_object = ChannelFilterSnapshot(**channel_filter_snapshot)
+ return channel_filter_snapshot_object
+
+ @cached_property
+ def escalation_chain_snapshot(self) -> Optional[EscalationChainSnapshot]:
+ # in some cases we need only escalation chain and don't want to serialize whole escalation
+ escalation_chain_snapshot_object = None
+ escalation_snapshot = self.raw_escalation_snapshot
+ if escalation_snapshot is not None:
+ escalation_chain_snapshot = EscalationChainSnapshot.serializer().to_internal_value(
+ escalation_snapshot["escalation_chain_snapshot"]
+ )
+ escalation_chain_snapshot_object = EscalationChainSnapshot(**escalation_chain_snapshot)
+ return escalation_chain_snapshot_object
+
+ @cached_property
+ def escalation_snapshot(self) -> Optional[EscalationSnapshot]:
+ escalation_snapshot_object = None
+ raw_escalation_snapshot = self.raw_escalation_snapshot
+ if raw_escalation_snapshot is not None:
+ escalation_snapshot_object = self._deserialize_escalation_snapshot(raw_escalation_snapshot)
+ return escalation_snapshot_object
+
+ def _deserialize_escalation_snapshot(self, raw_escalation_snapshot) -> EscalationSnapshot:
+ """
+ Deserializes raw escalation snapshot to EscalationSnapshot object with channel_filter_snapshot as
+ ChannelFilterSnapshot object and escalation_policies_snapshots as EscalationPolicySnapshot objects
+ :param raw_escalation_snapshot: dict
+ :return: EscalationSnapshot
+ """
+ deserialized_escalation_snapshot = EscalationSnapshot.serializer().to_internal_value(raw_escalation_snapshot)
+ channel_filter_snapshot = deserialized_escalation_snapshot["channel_filter_snapshot"]
+ deserialized_escalation_snapshot["channel_filter_snapshot"] = ChannelFilterSnapshot(**channel_filter_snapshot)
+
+ escalation_chain_snapshot = deserialized_escalation_snapshot["escalation_chain_snapshot"]
+ deserialized_escalation_snapshot["escalation_chain_snapshot"] = EscalationChainSnapshot(
+ **escalation_chain_snapshot
+ )
+
+ escalation_policies_snapshots_raw = deserialized_escalation_snapshot["escalation_policies_snapshots"]
+ escalation_policies_snapshots = []
+ for escalation_policy_snapshot in escalation_policies_snapshots_raw:
+ escalation_policies_snapshots.append(EscalationPolicySnapshot(**escalation_policy_snapshot))
+ deserialized_escalation_snapshot["escalation_policies_snapshots"] = escalation_policies_snapshots
+
+ escalation_snapshot_object = EscalationSnapshot(self, **deserialized_escalation_snapshot)
+ return escalation_snapshot_object
+
+ @property
+ def escalation_chain_exists(self):
+ return not self.pause_escalation and self.channel_filter and self.channel_filter.escalation_chain
+
+ @property
+ def pause_escalation(self):
+ # get pause_escalation field directly to avoid serialization overhead
+ return self.raw_escalation_snapshot is not None and self.raw_escalation_snapshot.get("pause_escalation", False)
+
+ @property
+ def next_step_eta(self):
+ # get next_step_eta field directly to avoid serialization overhead
+ raw_next_step_eta = (
+ self.raw_escalation_snapshot.get("next_step_eta") if self.raw_escalation_snapshot is not None else None
+ )
+ if raw_next_step_eta:
+ return parse(raw_next_step_eta).replace(tzinfo=pytz.UTC)
+
+ def start_escalation_if_needed(self, countdown=ScenarioStep.CROSS_ACTION_DELAY, eta=None):
+ """
+ :type self:AlertGroup
+ """
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+
+ if self.pause_escalation:
+ return
+
+ if not self.escalation_chain_exists:
+ return
+
+ logger.debug(f"Start escalation for alert group with pk: {self.pk}")
+
+ # take raw escalation snapshot from db if escalation is paused
+ raw_escalation_snapshot = (
+ self.build_raw_escalation_snapshot() if not self.pause_escalation else self.raw_escalation_snapshot
+ )
+ task_id = celery_uuid()
+
+ AlertGroup.all_objects.filter(pk=self.pk,).update(
+ active_escalation_id=task_id,
+ is_escalation_finished=False,
+ raw_escalation_snapshot=raw_escalation_snapshot,
+ )
+ if not self.pause_escalation:
+ calculate_escalation_finish_time.apply_async((self.pk,), immutable=True)
+ escalate_alert_group.apply_async((self.pk,), countdown=countdown, immutable=True, eta=eta, task_id=task_id)
+
+ def stop_escalation(self):
+ self.is_escalation_finished = True
+ self.estimate_escalation_finish_time = None
+ # change active_escalation_id to prevent alert escalation
+ self.active_escalation_id = "intentionally_stopped"
+ self.save(update_fields=["is_escalation_finished", "estimate_escalation_finish_time", "active_escalation_id"])
diff --git a/engine/apps/alerts/escalation_snapshot/serializers/__init__.py b/engine/apps/alerts/escalation_snapshot/serializers/__init__.py
new file mode 100644
index 0000000000..baaf88e029
--- /dev/null
+++ b/engine/apps/alerts/escalation_snapshot/serializers/__init__.py
@@ -0,0 +1,4 @@
+from .channel_filter_snapshot import ChannelFilterSnapshotSerializer # noqa: F401
+from .escalation_chain_snapshot import EscalationChainSnapshotSerializer # noqa: F401
+from .escalation_policy_snapshot import EscalationPolicySnapshotSerializer # noqa: F401
+from .escalation_snapshot import EscalationSnapshotSerializer # noqa: F401
diff --git a/engine/apps/alerts/escalation_snapshot/serializers/channel_filter_snapshot.py b/engine/apps/alerts/escalation_snapshot/serializers/channel_filter_snapshot.py
new file mode 100644
index 0000000000..e45c58fe6c
--- /dev/null
+++ b/engine/apps/alerts/escalation_snapshot/serializers/channel_filter_snapshot.py
@@ -0,0 +1,23 @@
+from rest_framework import serializers
+
+from apps.alerts.models.channel_filter import ChannelFilter
+
+
+class ChannelFilterSnapshotSerializer(serializers.ModelSerializer):
+ id = serializers.IntegerField()
+
+ class Meta:
+ model = ChannelFilter
+ fields = [
+ "id",
+ "str_for_clients",
+ "notify_in_slack",
+ "notify_in_telegram",
+ "notification_backends",
+ ]
+
+ def to_internal_value(self, data):
+ result = super().to_internal_value(data)
+ result["str_for_clients"] = data.get("str_for_clients")
+ result["notification_backends"] = data.get("notification_backends")
+ return result
diff --git a/engine/apps/alerts/escalation_snapshot/serializers/escalation_chain_snapshot.py b/engine/apps/alerts/escalation_snapshot/serializers/escalation_chain_snapshot.py
new file mode 100644
index 0000000000..a63aea34a1
--- /dev/null
+++ b/engine/apps/alerts/escalation_snapshot/serializers/escalation_chain_snapshot.py
@@ -0,0 +1,14 @@
+from rest_framework import serializers
+
+from apps.alerts.models.escalation_chain import EscalationChain
+
+
+class EscalationChainSnapshotSerializer(serializers.ModelSerializer):
+ id = serializers.IntegerField()
+
+ class Meta:
+ model = EscalationChain
+ fields = [
+ "id",
+ "name",
+ ]
diff --git a/engine/apps/alerts/escalation_snapshot/serializers/escalation_policy_snapshot.py b/engine/apps/alerts/escalation_snapshot/serializers/escalation_policy_snapshot.py
new file mode 100644
index 0000000000..82908ff421
--- /dev/null
+++ b/engine/apps/alerts/escalation_snapshot/serializers/escalation_policy_snapshot.py
@@ -0,0 +1,85 @@
+from rest_framework import serializers
+
+from apps.alerts.models.custom_button import CustomButton
+from apps.alerts.models.escalation_policy import EscalationPolicy
+from apps.schedules.models import OnCallSchedule
+from apps.user_management.models import User
+
+
+class PrimaryKeyRelatedFieldWithNoneValue(serializers.PrimaryKeyRelatedField):
+ """
+ Returns None instead of ValidationError if related object does not exist
+ """
+
+ def to_internal_value(self, data):
+ if self.pk_field is not None:
+ data = self.pk_field.to_internal_value(data)
+ try:
+ return self.get_queryset().filter(pk=data).first()
+ except (TypeError, ValueError):
+ self.fail("incorrect_type", data_type=type(data).__name__)
+
+
+class ManyRelatedFieldWithNoneCleanup(serializers.ManyRelatedField):
+ """
+ Removes None values from ManyRelatedFields.
+ Expected to be used with PrimaryKeyRelatedFieldWithNoneValue.
+
+ Example:
+ # We have input data with non-existent primary key
+
+ PrimaryKeyRelatedField(many=True, queryset=...) # raise ValidationError
+ PrimaryKeyRelatedFieldWithNoneValue(many=True, queryset=...) # will return [None] for non-existent id
+ ManyRelatedFieldWithNoneCleanup(child_relation=PrimaryKeyRelatedField(queryset=...)) # raise ValidationError
+ ManyRelatedFieldWithNoneCleanup(child_relation=PrimaryKeyRelatedFieldWithNoneValue(queryset=...)) # just return []
+ """
+
+ def to_internal_value(self, data):
+ if isinstance(data, str) or not hasattr(data, "__iter__"):
+ self.fail("not_a_list", input_type=type(data).__name__)
+ if not self.allow_empty and len(data) == 0:
+ self.fail("empty")
+
+ internal_value = []
+ for item in data:
+ child_internal_value = self.child_relation.to_internal_value(item)
+ if child_internal_value is not None:
+ internal_value.append(child_internal_value)
+ return internal_value
+
+
+class EscalationPolicySnapshotSerializer(serializers.ModelSerializer):
+ id = serializers.IntegerField()
+ order = serializers.IntegerField()
+ wait_delay = serializers.DurationField(allow_null=True)
+ notify_to_users_queue = ManyRelatedFieldWithNoneCleanup(
+ child_relation=PrimaryKeyRelatedFieldWithNoneValue(allow_null=True, queryset=User.objects)
+ )
+ escalation_counter = serializers.IntegerField(default=0)
+ passed_last_time = serializers.DateTimeField(allow_null=True, default=None)
+ custom_button_trigger = PrimaryKeyRelatedFieldWithNoneValue(allow_null=True, queryset=CustomButton.objects)
+ notify_schedule = PrimaryKeyRelatedFieldWithNoneValue(allow_null=True, queryset=OnCallSchedule.objects)
+ num_alerts_in_window = serializers.IntegerField(allow_null=True, default=None)
+ num_minutes_in_window = serializers.IntegerField(allow_null=True, default=None)
+ pause_escalation = serializers.BooleanField(default=False)
+
+ class Meta:
+ model = EscalationPolicy
+ fields = [
+ "id",
+ "order",
+ "step",
+ "wait_delay",
+ "notify_to_users_queue",
+ "last_notified_user",
+ "from_time",
+ "to_time",
+ "num_alerts_in_window",
+ "num_minutes_in_window",
+ "custom_button_trigger",
+ "notify_schedule",
+ "notify_to_group",
+ "escalation_counter",
+ "passed_last_time",
+ "pause_escalation",
+ ]
diff --git a/engine/apps/alerts/escalation_snapshot/serializers/escalation_snapshot.py b/engine/apps/alerts/escalation_snapshot/serializers/escalation_snapshot.py
new file mode 100644
index 0000000000..fbe15be0b7
--- /dev/null
+++ b/engine/apps/alerts/escalation_snapshot/serializers/escalation_snapshot.py
@@ -0,0 +1,28 @@
+from rest_framework import serializers
+
+from apps.alerts.escalation_snapshot.serializers import (
+ ChannelFilterSnapshotSerializer,
+ EscalationChainSnapshotSerializer,
+ EscalationPolicySnapshotSerializer,
+)
+
+
+class EscalationSnapshotSerializer(serializers.Serializer):
+ channel_filter_snapshot = ChannelFilterSnapshotSerializer()
+ escalation_chain_snapshot = EscalationChainSnapshotSerializer()
+ last_active_escalation_policy_order = serializers.IntegerField(allow_null=True, default=None)
+ escalation_policies_snapshots = EscalationPolicySnapshotSerializer(many=True)
+ slack_channel_id = serializers.CharField(allow_null=True)
+ pause_escalation = serializers.BooleanField(allow_null=True, default=False)
+ next_step_eta = serializers.DateTimeField(allow_null=True, default=None)
+
+ class Meta:
+ fields = [
+ "channel_filter_snapshot",
+ "escalation_chain_snapshot",
+ "last_active_escalation_policy_order",
+ "escalation_policies_snapshots",
+ "slack_channel_id",
+ "pause_escalation",
+ "next_step_eta",
+ ]
diff --git a/engine/apps/alerts/escalation_snapshot/snapshot_classes/__init__.py b/engine/apps/alerts/escalation_snapshot/snapshot_classes/__init__.py
new file mode 100644
index 0000000000..0e72a40eb7
--- /dev/null
+++ b/engine/apps/alerts/escalation_snapshot/snapshot_classes/__init__.py
@@ -0,0 +1,4 @@
+from .channel_filter_snapshot import ChannelFilterSnapshot # noqa: F401
+from .escalation_chain_snapshot import EscalationChainSnapshot # noqa: F401
+from .escalation_policy_snapshot import EscalationPolicySnapshot # noqa: F401
+from .escalation_snapshot import EscalationSnapshot # noqa: F401
diff --git a/engine/apps/alerts/escalation_snapshot/snapshot_classes/channel_filter_snapshot.py b/engine/apps/alerts/escalation_snapshot/snapshot_classes/channel_filter_snapshot.py
new file mode 100644
index 0000000000..7438a8d19f
--- /dev/null
+++ b/engine/apps/alerts/escalation_snapshot/snapshot_classes/channel_filter_snapshot.py
@@ -0,0 +1,15 @@
+from apps.alerts.escalation_snapshot.serializers import ChannelFilterSnapshotSerializer
+
+
+class ChannelFilterSnapshot:
+
+ __slots__ = ("id", "str_for_clients", "notify_in_slack", "notify_in_telegram", "notification_backends")
+
+ serializer = ChannelFilterSnapshotSerializer
+
+ def __init__(self, id, str_for_clients, notify_in_slack, notify_in_telegram, notification_backends):
+ self.id = id
+ self.str_for_clients = str_for_clients
+ self.notify_in_slack = notify_in_slack
+ self.notify_in_telegram = notify_in_telegram
+ self.notification_backends = notification_backends
diff --git a/engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_chain_snapshot.py b/engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_chain_snapshot.py
new file mode 100644
index 0000000000..0b7e574ba8
--- /dev/null
+++ b/engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_chain_snapshot.py
@@ -0,0 +1,12 @@
+from apps.alerts.escalation_snapshot.serializers import EscalationChainSnapshotSerializer
+
+
+class EscalationChainSnapshot:
+
+ __slots__ = ("id", "name")
+
+ serializer = EscalationChainSnapshotSerializer
+
+ def __init__(self, id, name):
+ self.id = id
+ self.name = name
diff --git a/engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_policy_snapshot.py b/engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_policy_snapshot.py
new file mode 100644
index 0000000000..2ee420e7c0
--- /dev/null
+++ b/engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_policy_snapshot.py
@@ -0,0 +1,485 @@
+from collections import namedtuple
+from typing import List, Optional
+
+from django.db import transaction
+from django.utils import timezone
+
+from apps.alerts.constants import NEXT_ESCALATION_DELAY
+from apps.alerts.escalation_snapshot.utils import eta_for_escalation_step_notify_if_time
+from apps.alerts.models.alert_group_log_record import AlertGroupLogRecord
+from apps.alerts.models.escalation_policy import EscalationPolicy
+from apps.alerts.tasks import (
+ custom_button_result,
+ notify_all_task,
+ notify_group_task,
+ notify_user_task,
+ resolve_by_last_step_task,
+)
+from apps.schedules.ical_utils import list_users_to_notify_from_ical
+from apps.user_management.models import User
+
+
+class EscalationPolicySnapshot:
+ __slots__ = (
+ "id",
+ "order",
+ "step",
+ "wait_delay",
+ "notify_to_users_queue",
+ "last_notified_user",
+ "from_time",
+ "to_time",
+ "num_alerts_in_window",
+ "num_minutes_in_window",
+ "custom_button_trigger",
+ "notify_schedule",
+ "notify_to_group",
+ "escalation_counter",
+ "passed_last_time",
+ "pause_escalation",
+ )
+
+ StepExecutionResultData = namedtuple(
+ "StepExecutionResultData",
+ ["eta", "stop_escalation", "start_from_beginning", "pause_escalation"],
+ )
+
+ def __init__(
+ self,
+ id,
+ order,
+ step,
+ wait_delay,
+ notify_to_users_queue,
+ last_notified_user,
+ from_time,
+ to_time,
+ num_alerts_in_window,
+ num_minutes_in_window,
+ custom_button_trigger,
+ notify_schedule,
+ notify_to_group,
+ escalation_counter,
+ passed_last_time,
+ pause_escalation,
+ ):
+ self.id = id
+ self.order = order
+ self.step = step
+ self.wait_delay = wait_delay
+ self.notify_to_users_queue = notify_to_users_queue
+ self.last_notified_user = last_notified_user
+ self.from_time = from_time
+ self.to_time = to_time
+ self.num_alerts_in_window = num_alerts_in_window
+ self.num_minutes_in_window = num_minutes_in_window
+ self.custom_button_trigger = custom_button_trigger
+ self.notify_schedule = notify_schedule
+ self.notify_to_group = notify_to_group
+ self.escalation_counter = escalation_counter # used for STEP_REPEAT_ESCALATION_N_TIMES
+ self.passed_last_time = passed_last_time # used for building escalation plan
+ self.pause_escalation = pause_escalation # used for STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW
+
+ def __str__(self) -> str:
+ return f"Escalation link, order: {self.order}, step: '{self.step_display}'"
+
+ @property
+ def step_display(self) -> str:
+ return EscalationPolicy.STEP_CHOICES[self.step][1]
+
+ @property
+ def escalation_policy(self) -> Optional[EscalationPolicy]:
+ return EscalationPolicy.objects.filter(pk=self.id).first()
+
+ @property
+ def sorted_users_queue(self) -> List[User]:
+ return sorted(self.notify_to_users_queue, key=lambda user: (user.username or "", user.pk))
+
+ @property
+ def next_user_in_sorted_queue(self) -> User:
+ users_queue = self.sorted_users_queue
+ try:
+ last_user_index = users_queue.index(self.last_notified_user)
+ except ValueError:
+ last_user_index = -1
+ next_user = users_queue[(last_user_index + 1) % len(users_queue)]
+ return next_user
+
+ def execute(self, alert_group, reason) -> StepExecutionResultData:
+ action_map = {
+ EscalationPolicy.STEP_WAIT: self._escalation_step_wait,
+ EscalationPolicy.STEP_FINAL_NOTIFYALL: self._escalation_step_notify_all,
+ EscalationPolicy.STEP_REPEAT_ESCALATION_N_TIMES: self._escalation_step_repeat_escalation_n_times,
+ EscalationPolicy.STEP_FINAL_RESOLVE: self._escalation_step_resolve,
+ EscalationPolicy.STEP_NOTIFY_GROUP: self._escalation_step_notify_user_group,
+ EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT: self._escalation_step_notify_user_group,
+ EscalationPolicy.STEP_NOTIFY_SCHEDULE: self._escalation_step_notify_on_call_schedule,
+ EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT: self._escalation_step_notify_on_call_schedule,
+ EscalationPolicy.STEP_TRIGGER_CUSTOM_BUTTON: self._escalation_step_trigger_custom_button,
+ EscalationPolicy.STEP_NOTIFY_USERS_QUEUE: self._escalation_step_notify_users_queue,
+ EscalationPolicy.STEP_NOTIFY_IF_TIME: self._escalation_step_notify_if_time,
+ EscalationPolicy.STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW: self._escalation_step_notify_if_num_alerts_in_time_window,
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS: self._escalation_step_notify_multiple_users,
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT: self._escalation_step_notify_multiple_users,
+ None: self._escalation_step_not_configured,
+ }
+ kwargs = {"reason": reason, "alert_group": alert_group}
+ result = action_map[self.step](**kwargs)
+ self.passed_last_time = timezone.now() # used for building escalation plan
+ # if step doesn't have data to return, return default values
+ if result is None:
+ result = self._get_result_tuple()
+ return result
+
+ def _escalation_step_wait(self, alert_group, **kwargs) -> StepExecutionResultData:
+ if self.wait_delay is not None:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ alert_group=alert_group,
+ reason="wait",
+ escalation_policy=self.escalation_policy,
+ escalation_policy_step=self.step,
+ )
+ else:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_FAILED,
+ alert_group=alert_group,
+ reason="wait",
+ escalation_policy=self.escalation_policy,
+ escalation_error_code=AlertGroupLogRecord.ERROR_ESCALATION_WAIT_STEP_IS_NOT_CONFIGURED,
+ escalation_policy_step=self.step,
+ )
+ wait_delay = self.wait_delay or EscalationPolicy.DEFAULT_WAIT_DELAY
+ eta = timezone.now() + wait_delay
+ log_record.save()
+ return self._get_result_tuple(eta=eta)
+
+ def _escalation_step_notify_all(self, alert_group, **kwargs) -> None:
+ tasks = []
+ notify_all = notify_all_task.signature(
+ args=(alert_group.pk,),
+ kwargs={"escalation_policy_snapshot_order": self.order},
+ immutable=True,
+ )
+ tasks.append(notify_all)
+ self._execute_tasks(tasks)
+
+ def _escalation_step_notify_users_queue(self, alert_group, reason) -> None:
+ tasks = []
+ escalation_policy = self.escalation_policy
+ if len(self.notify_to_users_queue) > 0:
+ next_user = self.next_user_in_sorted_queue
+ self.last_notified_user = next_user
+ if escalation_policy is not None:
+ escalation_policy.last_notified_user = next_user
+ escalation_policy.save(update_fields=["last_notified_user"])
+
+ notify_task = notify_user_task.signature(
+ (
+ next_user.pk,
+ alert_group.pk,
+ ),
+ {
+ "reason": reason,
+ },
+ immutable=True,
+ )
+
+ tasks.append(notify_task)
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ author_id=next_user.pk,
+ alert_group=alert_group,
+ reason=reason,
+ escalation_policy=escalation_policy,
+ escalation_policy_step=self.step,
+ )
+ else:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_FAILED,
+ alert_group=alert_group,
+ escalation_policy=escalation_policy,
+ escalation_error_code=AlertGroupLogRecord.ERROR_ESCALATION_NOTIFY_QUEUE_NO_RECIPIENTS,
+ escalation_policy_step=self.step,
+ )
+ log_record.save()
+ self._execute_tasks(tasks)
+
+ def _escalation_step_notify_multiple_users(self, alert_group, reason) -> None:
+ tasks = []
+ escalation_policy = self.escalation_policy
+ if len(self.notify_to_users_queue) > 0:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ alert_group=alert_group,
+ reason=reason,
+ escalation_policy=escalation_policy,
+ escalation_policy_step=self.step,
+ )
+
+ for user in self.notify_to_users_queue:
+ notify_task = notify_user_task.signature(
+ (
+ user.pk,
+ alert_group.pk,
+ ),
+ {
+ "reason": reason,
+ "important": self.step == EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
+ },
+ immutable=True,
+ )
+
+ tasks.append(notify_task)
+
+ AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ author=user,
+ alert_group=alert_group,
+ reason=reason,
+ escalation_policy=escalation_policy,
+ escalation_policy_step=self.step,
+ ).save()
+ else:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_FAILED,
+ alert_group=alert_group,
+ escalation_policy=escalation_policy,
+ escalation_error_code=AlertGroupLogRecord.ERROR_ESCALATION_NOTIFY_MULTIPLE_NO_RECIPIENTS,
+ escalation_policy_step=self.step,
+ )
+ log_record.save()
+ self._execute_tasks(tasks)
+
+ def _escalation_step_notify_on_call_schedule(self, alert_group, reason) -> None:
+ tasks = []
+ escalation_policy = self.escalation_policy
+ on_call_schedule = self.notify_schedule
+ self.notify_to_users_queue = []
+
+ if on_call_schedule is None:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_FAILED,
+ alert_group=alert_group,
+ escalation_policy=escalation_policy,
+ escalation_error_code=AlertGroupLogRecord.ERROR_ESCALATION_SCHEDULE_DOES_NOT_SELECTED,
+ escalation_policy_step=self.step,
+ )
+ else:
+ notify_to_users_list = list_users_to_notify_from_ical(on_call_schedule)
+ if notify_to_users_list is None:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_FAILED,
+ alert_group=alert_group,
+ escalation_policy=escalation_policy,
+ escalation_error_code=AlertGroupLogRecord.ERROR_ESCALATION_ICAL_IMPORT_FAILED,
+ escalation_policy_step=self.step,
+ step_specific_info={"schedule_name": on_call_schedule.name},
+ )
+ elif len(notify_to_users_list) == 0:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_FAILED,
+ alert_group=alert_group,
+ reason=reason,
+ escalation_policy=escalation_policy,
+ escalation_error_code=AlertGroupLogRecord.ERROR_ESCALATION_ICAL_NO_VALID_USERS,
+ escalation_policy_step=self.step,
+ step_specific_info={"schedule_name": on_call_schedule.name},
+ )
+ else:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ alert_group=alert_group,
+ reason=reason,
+ escalation_policy=escalation_policy,
+ escalation_policy_step=self.step,
+ step_specific_info={"schedule_name": on_call_schedule.name},
+ )
+ self.notify_to_users_queue = notify_to_users_list
+
+ for notify_to_user in notify_to_users_list:
+ reason = "user is on duty by schedule ({}) defined in iCal".format(on_call_schedule.name)
+ notify_task = notify_user_task.signature(
+ (
+ notify_to_user.pk,
+ alert_group.pk,
+ ),
+ {
+ "reason": reason,
+ "important": self.step == EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT,
+ },
+ immutable=True,
+ )
+
+ tasks.append(notify_task)
+
+ AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ author=notify_to_user,
+ alert_group=alert_group,
+ reason=reason,
+ escalation_policy=escalation_policy,
+ escalation_policy_step=self.step,
+ ).save()
+ log_record.save()
+ self._execute_tasks(tasks)
+
+ def _escalation_step_notify_user_group(self, alert_group, reason) -> None:
+ tasks = []
+ self.notify_to_users_queue = []
+
+ if self.notify_to_group is None:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_FAILED,
+ alert_group=alert_group,
+ reason=reason,
+ escalation_policy=self.escalation_policy,
+ escalation_error_code=AlertGroupLogRecord.ERROR_ESCALATION_NOTIFY_GROUP_STEP_IS_NOT_CONFIGURED,
+ escalation_policy_step=self.step,
+ )
+ log_record.save()
+ else:
+ notify_group = notify_group_task.signature(
+ args=(alert_group.pk,),
+ kwargs={
+ "escalation_policy_snapshot_order": self.order,
+ },
+ immutable=True,
+ )
+ tasks.append(notify_group)
+ self._execute_tasks(tasks)
+
+ def _escalation_step_notify_if_time(self, alert_group, **kwargs) -> StepExecutionResultData:
+ eta = None
+
+ if self.from_time is None or self.to_time is None:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_FAILED,
+ alert_group=alert_group,
+ escalation_policy=self.escalation_policy,
+ escalation_error_code=AlertGroupLogRecord.ERROR_ESCALATION_NOTIFY_IF_TIME_IS_NOT_CONFIGURED,
+ escalation_policy_step=self.step,
+ )
+ else:
+ eta = eta_for_escalation_step_notify_if_time(self.from_time, self.to_time)
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ author=None,
+ alert_group=alert_group,
+ reason="notify if time",
+ eta=eta,
+ escalation_policy=self.escalation_policy,
+ escalation_policy_step=self.step,
+ )
+
+ log_record.save()
+ return self._get_result_tuple(eta=eta)
+
+ def _escalation_step_notify_if_num_alerts_in_time_window(self, alert_group, **kwargs):
+ # check if current escalation policy is configured properly, otherwise create an error log
+ if self.num_alerts_in_window is None or self.num_minutes_in_window is None:
+ AlertGroupLogRecord.objects.create(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_FAILED,
+ alert_group=alert_group,
+ escalation_policy=self.escalation_policy,
+ escalation_error_code=AlertGroupLogRecord.ERROR_ESCALATION_NOTIFY_IF_NUM_ALERTS_IN_WINDOW_STEP_IS_NOT_CONFIGURED,
+ escalation_policy_step=self.step,
+ )
+ return
+
+ # create a log record only when escalation is paused for the first time
+ if not self.pause_escalation:
+ AlertGroupLogRecord.objects.create(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ author=None,
+ alert_group=alert_group,
+ reason="continue escalation if >X alerts per Y minutes",
+ escalation_policy=self.escalation_policy,
+ escalation_policy_step=self.step,
+ )
+
+ last_alert = alert_group.alerts.last()
+
+ time_delta = timezone.timedelta(minutes=self.escalation_policy.num_minutes_in_window)
+ num_alerts_in_window = alert_group.alerts.filter(created_at__gte=last_alert.created_at - time_delta).count()
+
+ # pause escalation if there are not enough alerts in time window
+ if num_alerts_in_window <= self.escalation_policy.num_alerts_in_window:
+ self.pause_escalation = True
+ return self._get_result_tuple(pause_escalation=True)
+
+ def _escalation_step_trigger_custom_button(self, alert_group, **kwargs) -> None:
+ tasks = []
+ custom_button = self.custom_button_trigger
+ if custom_button is not None:
+ custom_button_task = custom_button_result.signature(
+ (custom_button.pk, alert_group.pk),
+ {
+ "escalation_policy_pk": self.id,
+ },
+ immutable=True,
+ )
+ tasks.append(custom_button_task)
+ else:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_FAILED,
+ alert_group=alert_group,
+ escalation_policy=self.escalation_policy,
+ escalation_error_code=AlertGroupLogRecord.ERROR_ESCALATION_TRIGGER_CUSTOM_BUTTON_STEP_IS_NOT_CONFIGURED,
+ escalation_policy_step=self.step,
+ )
+ log_record.save()
+ self._execute_tasks(tasks)
+
+ def _escalation_step_repeat_escalation_n_times(self, alert_group, **kwargs) -> StepExecutionResultData:
+ if self.escalation_counter < EscalationPolicy.MAX_TIMES_REPEAT:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ author=None,
+ alert_group=alert_group,
+ reason="repeat escalation",
+ escalation_policy=self.escalation_policy,
+ escalation_policy_step=self.step,
+ )
+ log_record.save()
+ self.escalation_counter += 1
+ return self._get_result_tuple(start_from_beginning=True)
+
+ def _escalation_step_resolve(self, alert_group, **kwargs) -> StepExecutionResultData:
+ tasks = []
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ author=None,
+ alert_group=alert_group,
+ reason="final resolve",
+ escalation_policy=self.escalation_policy,
+ escalation_policy_step=self.step,
+ )
+ log_record.save()
+ resolve_by_last_step = resolve_by_last_step_task.signature((alert_group.pk,), immutable=True)
+ tasks.append(resolve_by_last_step)
+ self._execute_tasks(tasks)
+ return self._get_result_tuple(stop_escalation=True)
+
+ def _escalation_step_not_configured(self, alert_group, **kwargs) -> None:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_FAILED,
+ alert_group=alert_group,
+ escalation_policy=self.escalation_policy,
+ escalation_error_code=AlertGroupLogRecord.ERROR_ESCALATION_UNSPECIFIED_STEP,
+ )
+ log_record.save()
+
+ def _execute_tasks(self, tasks) -> None:
+ def _apply_tasks():
+ for task in tasks:
+ task.apply_async()
+
+ transaction.on_commit(_apply_tasks)
+
+ def _get_result_tuple(
+ self, eta=None, stop_escalation=False, start_from_beginning=False, pause_escalation=False
+ ) -> StepExecutionResultData:
+ # use default delay for eta, if eta was not counted by step
+ eta = eta or timezone.now() + timezone.timedelta(seconds=NEXT_ESCALATION_DELAY)
+ return self.StepExecutionResultData(eta, stop_escalation, start_from_beginning, pause_escalation)
diff --git a/engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_snapshot.py b/engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_snapshot.py
new file mode 100644
index 0000000000..d4845c57d1
--- /dev/null
+++ b/engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_snapshot.py
@@ -0,0 +1,131 @@
+import logging
+from typing import Optional
+
+from celery.utils.log import get_task_logger
+
+from apps.alerts.escalation_snapshot.serializers import EscalationSnapshotSerializer
+from apps.alerts.escalation_snapshot.snapshot_classes.escalation_policy_snapshot import EscalationPolicySnapshot
+from apps.alerts.models.alert_group_log_record import AlertGroupLogRecord
+
+logger = get_task_logger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+class EscalationSnapshot:
+ __slots__ = (
+ "alert_group",
+ "channel_filter_snapshot",
+ "escalation_chain_snapshot",
+ "escalation_policies_snapshots",
+ "last_active_escalation_policy_order",
+ "slack_channel_id",
+ "next_step_eta",
+ "stop_escalation",
+ "pause_escalation",
+ )
+
+ serializer = EscalationSnapshotSerializer
+
+ def __init__(
+ self,
+ alert_group,
+ channel_filter_snapshot,
+ escalation_chain_snapshot,
+ last_active_escalation_policy_order,
+ escalation_policies_snapshots,
+ slack_channel_id,
+ pause_escalation,
+ next_step_eta,
+ ):
+ self.alert_group = alert_group
+ self.channel_filter_snapshot = channel_filter_snapshot # ChannelFilterSnapshot object
+ self.escalation_chain_snapshot = escalation_chain_snapshot # EscalationChainSnapshot object
+ self.last_active_escalation_policy_order = last_active_escalation_policy_order
+ self.escalation_policies_snapshots = escalation_policies_snapshots # list of EscalationPolicySnapshot objects
+ self.slack_channel_id = slack_channel_id
+ self.pause_escalation = pause_escalation
+ self.next_step_eta = next_step_eta
+ self.stop_escalation = False
+
+ @property
+ def last_active_escalation_policy_snapshot(self) -> Optional[EscalationPolicySnapshot]:
+ order = self.last_active_escalation_policy_order
+ if order is None:
+ return None
+ return self.escalation_policies_snapshots[order]
+
+ @property
+ def next_active_escalation_policy_snapshot(self) -> Optional[EscalationPolicySnapshot]:
+ order = self.next_active_escalation_policy_order
+ if len(self.escalation_policies_snapshots) < order + 1:
+ next_link = None
+ else:
+ next_link = self.escalation_policies_snapshots[order]
+ return next_link
+
+ @property
+ def next_active_escalation_policy_order(self) -> int:
+ if self.last_active_escalation_policy_order is None:
+ next_order = 0
+ else:
+ next_order = self.last_active_escalation_policy_order + 1
+ return next_order
+
+ def save_to_alert_group(self) -> None:
+ self.alert_group.raw_escalation_snapshot = self.convert_to_dict()
+ self.alert_group.save(update_fields=["raw_escalation_snapshot"])
+
+ def convert_to_dict(self) -> dict:
+ return self.serializer(self).data
+
+ def execute_actual_escalation_step(self) -> None:
+ """
+ Executes actual escalation step and saves result of execution like stop_escalation param and eta,
+ that will be used for start next escalate_alert_group task.
+ Also updates self.last_active_escalation_policy_order if escalation step was executed.
+ :return: None
+ """
+ escalation_policy_snapshot = self.next_active_escalation_policy_snapshot
+ if escalation_policy_snapshot is None:
+ AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_FINISHED,
+ alert_group=self.alert_group,
+ reason="escalation finished",
+ ).save()
+ self.stop_escalation = True
+ logger.debug(
+ "escalation_policy_snapshot is None, stop escalation. Last escalation policy snapshot order "
+ f"{self.last_active_escalation_policy_order}"
+ )
+ else:
+ logger.debug(
+ f"Starting to execute escalation step {escalation_policy_snapshot.step_display} with order "
+ f"{escalation_policy_snapshot.order}"
+ )
+
+ reason = f"lifecycle rule for {self.channel_filter_snapshot.str_for_clients} route"
+
+ # get execution result in namedtuple format and save its data
+ # (e.g. StepExecutionResultData(eta=None, start_from_beginning=False, stop_escalation=False)
+ execution_result = escalation_policy_snapshot.execute(alert_group=self.alert_group, reason=reason)
+
+ self.next_step_eta = execution_result.eta
+ self.stop_escalation = execution_result.stop_escalation # result of STEP_FINAL_RESOLVE
+ self.pause_escalation = execution_result.pause_escalation # result of STEP_NOTIFY_IF_NUM_ALERTS_IN_WINDOW
+
+ last_active_escalation_policy_order = escalation_policy_snapshot.order
+
+ if execution_result.start_from_beginning: # result of STEP_REPEAT_ESCALATION_N_TIMES
+ last_active_escalation_policy_order = None
+
+ # do not advance to the next escalation policy if escalation is paused
+ if execution_result.pause_escalation:
+ last_active_escalation_policy_order = self.last_active_escalation_policy_order
+
+ self.last_active_escalation_policy_order = last_active_escalation_policy_order
+
+ logger.debug(
+ f"Finished to execute escalation step {escalation_policy_snapshot.step_display} with order "
+ f"{escalation_policy_snapshot.order}, next escalation policy snapshot order "
+ f"{self.next_active_escalation_policy_order}"
+ )
diff --git a/engine/apps/alerts/escalation_snapshot/utils.py b/engine/apps/alerts/escalation_snapshot/utils.py
new file mode 100644
index 0000000000..d72adde40e
--- /dev/null
+++ b/engine/apps/alerts/escalation_snapshot/utils.py
@@ -0,0 +1,29 @@
+import pytz
+from django.utils import timezone
+
+
+def eta_for_escalation_step_notify_if_time(from_time, to_time, current_time=None) -> timezone.datetime:
+ """
+ Counts eta for STEP_NOTIFY_IF_TIME
+ :return: timezone.datetime
+ """
+ eta = current_time
+ current_time = current_time or timezone.now()
+ if from_time < to_time:
+ if from_time > current_time.time():
+ eta = timezone.datetime.combine(current_time.date(), from_time).astimezone(pytz.UTC)
+ elif current_time.time() >= to_time:
+ eta = timezone.datetime.combine((current_time + timezone.timedelta(days=1)).date(), from_time).astimezone(
+ pytz.UTC
+ )
+ elif from_time > to_time:
+ if from_time > current_time.time() >= to_time:
+ eta = timezone.datetime.combine(current_time.date(), from_time).astimezone(pytz.UTC)
+ elif from_time == to_time:
+ if from_time > current_time.time():
+ eta = timezone.datetime.combine(current_time.date(), from_time).astimezone(pytz.UTC)
+ elif current_time.time() > to_time:
+ eta = timezone.datetime.combine((current_time + timezone.timedelta(days=1)).date(), from_time).astimezone(
+ pytz.UTC
+ )
+ return eta
diff --git a/engine/apps/alerts/grafana_alerting_sync_manager/__init__.py b/engine/apps/alerts/grafana_alerting_sync_manager/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/alerts/grafana_alerting_sync_manager/grafana_alerting_sync.py b/engine/apps/alerts/grafana_alerting_sync_manager/grafana_alerting_sync.py
new file mode 100644
index 0000000000..7bfcbdefce
--- /dev/null
+++ b/engine/apps/alerts/grafana_alerting_sync_manager/grafana_alerting_sync.py
@@ -0,0 +1,421 @@
+import copy
+import logging
+from typing import Optional
+
+from django.apps import apps
+from rest_framework import status
+
+from apps.alerts.tasks import create_contact_points_for_datasource
+from apps.grafana_plugin.helpers import GrafanaAPIClient
+
+logger = logging.getLogger(__name__)
+
+
+class GrafanaAlertingSyncManager:
+ """
+ Create or update Grafana Alerting contact points and notification policies for INTEGRATION_GRAFANA_ALERTING
+ by updating Grafana Alerting config for each datasource with type 'alertmanager'
+ """
+
+ GRAFANA_CONTACT_POINT = "grafana"
+ ALERTING_DATASOURCE = "alertmanager"
+
+ def __init__(self, alert_receive_channel):
+ self.alert_receive_channel = alert_receive_channel
+ self.client = GrafanaAPIClient(
+ api_url=self.alert_receive_channel.organization.grafana_url,
+ api_token=self.alert_receive_channel.organization.api_token,
+ )
+
+ @classmethod
+ def check_for_connection_errors(cls, organization) -> Optional[str]:
+ """Check if it possible to connect to alerting, otherwise return error message"""
+ client = GrafanaAPIClient(api_url=organization.grafana_url, api_token=organization.api_token)
+ recipient = cls.GRAFANA_CONTACT_POINT
+ config, response_info = client.get_alerting_config(recipient)
+ if config is None:
+ logger.warning(
+ f"Failed to connect to contact point (GET): Is unified alerting enabled on instance? {response_info}"
+ )
+ return (
+ "Failed to create the integration with current Grafana Alerting. "
+ "Please reach out to our support team"
+ )
+
+ datasource_list, response_info = client.get_datasources()
+ if datasource_list is None:
+ logger.warning(
+ f"Failed to connect to alerting datasource (GET): Is unified alerting enabled "
+ f"on instance? {response_info}"
+ )
+ return (
+ "Failed to create the integration with current Grafana Alerting. "
+ "Please reach out to our support team"
+ )
+ return
+
+ def create_contact_points(self) -> None:
+ """
+ Get all alertmanager datasources and try to create contact points for them.
+ Start async task to create contact points that was not created.
+ If all contact points was created, set channel flag 'is_finished_alerting_setup' to True.
+ """
+ # create contact point for grafana alertmanager
+ # in this case we don't have datasource data
+ self.create_contact_point()
+ # try to create other contact points
+ datasources, response_info = self.client.get_datasources()
+ if datasources is None:
+ logger.warning(
+ f"Failed to get datasource list for organization {self.alert_receive_channel.organization.org_title}, "
+ f"{response_info}"
+ )
+ return
+
+ # list of datasource for which contact point creation was failed
+ datasources_to_create = []
+ # sync other datasource
+ for datasource in datasources:
+ if datasource["type"] == GrafanaAlertingSyncManager.ALERTING_DATASOURCE:
+ if self.create_contact_point(datasource) is None:
+ # Failed to create contact point duo to getting wrong alerting config. It is expected behaviour.
+ # Add datasource to list and retry to create contact point for it async
+ datasources_to_create.append(datasource)
+
+ if datasources_to_create:
+ # create other contact points async
+ create_contact_points_for_datasource.apply_async(
+ (self.alert_receive_channel.pk, datasources_to_create),
+ )
+ else:
+ self.alert_receive_channel.is_finished_alerting_setup = True
+ self.alert_receive_channel.save(update_fields=["is_finished_alerting_setup"])
+
+ def create_contact_point(self, datasource=None) -> Optional["apps.alerts.models.GrafanaAlertingContactPoint"]:
+ """
+ Try to create a contact point for datasource.
+ Return None if contact point was not created otherwise return contact point object
+ """
+ if datasource is None:
+ datasource = {}
+
+ datasource_id_or_grafana = datasource.get("id") or GrafanaAlertingSyncManager.GRAFANA_CONTACT_POINT
+ datasource_type = datasource.get("type") or GrafanaAlertingSyncManager.GRAFANA_CONTACT_POINT
+ is_grafana_datasource = datasource.get("id") is None
+ logger.info(
+ f"Create contact point for {datasource_type} datasource, integration {self.alert_receive_channel.pk}"
+ )
+ config, response_info = self.client.get_alerting_config(datasource_id_or_grafana)
+
+ if config is None:
+ logger.warning(
+ f"Failed to create contact point (GET) for integration {self.alert_receive_channel.pk}: "
+ f"Is unified alerting enabled on instance? {response_info}"
+ )
+ return
+
+ updated_config = copy.deepcopy(config)
+
+ if config["alertmanager_config"] is None:
+ default_config, response_info = self.client.get_alertmanager_status_with_config(datasource_id_or_grafana)
+ if default_config is None:
+ logger.warning(
+ f"Failed to create contact point (alertmanager_config is None) for integration "
+ f"{self.alert_receive_channel.pk}, {response_info}"
+ )
+ return
+ updated_config = {"alertmanager_config": copy.deepcopy(default_config["config"])}
+
+ receiver_name = self.alert_receive_channel.emojized_verbal_name
+
+ routes = updated_config["alertmanager_config"]["route"].get("routes", [])
+ new_route = GrafanaAlertingSyncManager._get_continue_route_config_for_datasource(
+ is_grafana_datasource,
+ receiver_name,
+ )
+ # Append the new route to the beginning of the list
+ # It must have `continue=True` parameter otherwise it will intercept all the alerts
+ updated_config["alertmanager_config"]["route"]["routes"] = [new_route] + routes
+
+ receivers = updated_config["alertmanager_config"]["receivers"]
+ new_receiver = GrafanaAlertingSyncManager._get_receiver_config_for_datasource(
+ is_grafana_datasource,
+ receiver_name,
+ self.alert_receive_channel.integration_url,
+ )
+ updated_config["alertmanager_config"]["receivers"] = receivers + [new_receiver]
+
+ response, response_info = self.client.update_alerting_config(updated_config, datasource_id_or_grafana)
+ if response is None:
+ logger.warning(
+ f"Failed to create contact point for integration {self.alert_receive_channel.pk} (POST): {response_info}"
+ )
+ if response_info.get("status_code") == status.HTTP_400_BAD_REQUEST:
+ logger.warning(f"Config: {config}\nUpdated config: {updated_config}")
+ return
+
+ config, response_info = self.client.get_alerting_config(datasource_id_or_grafana)
+ contact_point = self._create_contact_point_from_payload(config, receiver_name, datasource)
+ contact_point_created_text = "created" if contact_point else "not created, creation will be retried"
+ logger.info(
+ f"Finished creating contact point for {datasource_type} datasource, "
+ f"integration {self.alert_receive_channel.pk}, contact point was {contact_point_created_text}"
+ )
+ return contact_point
+
+ @staticmethod
+ def _get_continue_route_config_for_datasource(is_grafana_datasource, receiver_name) -> dict:
+ """Return route config, related on type of datasource"""
+
+ if is_grafana_datasource:
+ route = {
+ "receiver": receiver_name,
+ "continue": True,
+ }
+ else:
+ route = {
+ "continue": True,
+ "group_by": [],
+ "matchers": [],
+ "receiver": receiver_name,
+ "routes": [],
+ }
+ return route
+
+ @staticmethod
+ def _get_receiver_config_for_datasource(is_grafana_datasource, receiver_name, webhook_url) -> dict:
+ """Return receiver config, related on type of datasource"""
+
+ if is_grafana_datasource:
+ receiver = {
+ "name": receiver_name,
+ "grafana_managed_receiver_configs": [
+ {
+ "name": receiver_name,
+ "type": "webhook",
+ "disableResolveMessage": False,
+ "settings": {
+ "httpMethod": "POST",
+ "url": webhook_url,
+ },
+ "secureFields": {},
+ }
+ ],
+ }
+ else:
+ receiver = {
+ "name": receiver_name,
+ "webhook_configs": [
+ {
+ "send_resolved": True,
+ "url": webhook_url,
+ }
+ ],
+ }
+ return receiver
+
+ def _create_contact_point_from_payload(
+ self,
+ payload,
+ receiver_name,
+ datasource,
+ ) -> "apps.alerts.models.GrafanaAlertingContactPoint":
+ """Get receiver data from payload and create contact point"""
+
+ is_grafana_datasource = datasource.get("id") is None
+
+ receiver_config = self._get_receiver_config(receiver_name, is_grafana_datasource, payload)
+
+ GrafanaAlertingContactPoint = apps.get_model("alerts", "GrafanaAlertingContactPoint")
+ contact_point = GrafanaAlertingContactPoint(
+ alert_receive_channel=self.alert_receive_channel,
+ name=receiver_config["name"],
+ uid=receiver_config.get("uid"), # uid is None for non-Grafana datasource
+ datasource_name=datasource.get("name") or GrafanaAlertingSyncManager.GRAFANA_CONTACT_POINT,
+ datasource_id=datasource.get("id"), # id is None for Grafana datasource
+ )
+ contact_point.save()
+ return contact_point
+
+ def _get_receiver_config(self, receiver_name, is_grafana_datasource, payload):
+ receiver_config = {}
+ receivers = payload["alertmanager_config"]["receivers"]
+ alerting_receiver = GrafanaAlertingSyncManager._get_receiver_by_name(receiver_name, receivers)
+
+ if is_grafana_datasource: # means that datasource is Grafana
+ for config in alerting_receiver["grafana_managed_receiver_configs"]:
+ if config["name"] == receiver_name:
+ receiver_config = config
+ break
+ else: # other datasource
+ for config in alerting_receiver.get("webhook_configs", []):
+ if config["url"] == self.alert_receive_channel.integration_url:
+ receiver_config = alerting_receiver
+ break
+ return receiver_config
+
+ @staticmethod
+ def _get_receiver_by_name(receiver_name, receivers):
+ for alerting_receiver in receivers:
+ if alerting_receiver["name"] == receiver_name:
+ return alerting_receiver
+
+ def sync_each_contact_point(self) -> None:
+ """Sync all channels contact points"""
+ logger.info(f"Starting to sync contact point for integration {self.alert_receive_channel.pk}")
+ contact_points = self.alert_receive_channel.contact_points.all()
+ for contact_point in contact_points:
+ self.sync_contact_point(contact_point)
+
+ def sync_contact_point(self, contact_point) -> None:
+ """Update name of contact point and related routes or delete it if integration was deleted"""
+ datasource_id = contact_point.datasource_id or GrafanaAlertingSyncManager.GRAFANA_CONTACT_POINT
+ datasource_type = "grafana" if not contact_point.datasource_id else "nongrafana"
+ logger.info(
+ f"Sync contact point for {datasource_type} (name: {contact_point.datasource_name}) datasource, integration "
+ f"{self.alert_receive_channel.pk}"
+ )
+
+ config, response_info = self.client.get_alerting_config(datasource_id)
+ if config is None:
+ logger.warning(
+ f"Failed to update contact point (GET) for integration {self.alert_receive_channel.pk}: Is unified "
+ f"alerting enabled on instance? {response_info}"
+ )
+ return
+
+ receivers = config["alertmanager_config"]["receivers"]
+ name_in_alerting = self.find_name_of_contact_point(
+ contact_point.uid,
+ datasource_id,
+ receivers,
+ )
+
+ updated_config = copy.deepcopy(config)
+ # if integration exists, update name for contact point and related routes
+ if self.alert_receive_channel.deleted_at is None:
+ new_name = self.alert_receive_channel.emojized_verbal_name
+ updated_config = GrafanaAlertingSyncManager._update_contact_point_name_in_config(
+ updated_config,
+ name_in_alerting,
+ new_name,
+ )
+ contact_point.name = new_name
+ if datasource_id != GrafanaAlertingSyncManager.GRAFANA_CONTACT_POINT:
+ datasource_name = self.get_datasource_name(datasource_id)
+ contact_point.datasource_name = datasource_name
+ contact_point.save(update_fields=["name", "datasource_name"])
+ # if integration was deleted, delete contact point and related routes
+ else:
+ updated_config = GrafanaAlertingSyncManager._remove_contact_point_from_config(
+ updated_config,
+ name_in_alerting,
+ )
+
+ response, response_info = self.client.update_alerting_config(updated_config, datasource_id)
+ if response is None:
+ logger.warning(
+ f"Failed to update contact point for integration {self.alert_receive_channel.pk} "
+ f"(POST): {response_info}"
+ )
+ return
+
+ if self.alert_receive_channel.deleted_at:
+ contact_point.delete()
+
+ logger.info(
+ f"Finish to sync contact point for {datasource_type} (name: {contact_point.datasource_name}) datasource, "
+ f"integration {self.alert_receive_channel.pk}"
+ )
+
+ @classmethod
+ def _update_contact_point_name_in_config(cls, config, name_in_alerting, new_name) -> dict:
+ receivers = config["alertmanager_config"]["receivers"]
+ route = config["alertmanager_config"]["route"]
+
+ config["alertmanager_config"]["route"] = cls._recursive_rename_routes(route, name_in_alerting, new_name)
+
+ for receiver in receivers:
+ if receiver["name"] == name_in_alerting:
+ receiver["name"] = new_name
+ receiver_configs = receiver.get("grafana_managed_receiver_configs", [])
+ for receiver_config in receiver_configs:
+ if receiver_config["name"] == name_in_alerting:
+ receiver_config["name"] = new_name
+ return config
+
+ @classmethod
+ def _recursive_rename_routes(cls, alerting_route, name_in_alerting, new_name) -> dict:
+ routes = alerting_route.get("routes", [])
+ for route in routes:
+ if route["receiver"] == name_in_alerting:
+ route["receiver"] = new_name
+
+ for idx, nested_route in enumerate(routes):
+ if nested_route.get("routes"):
+ alerting_route["routes"][idx] = cls._recursive_rename_routes(nested_route, name_in_alerting, new_name)
+
+ return alerting_route
+
+ @classmethod
+ def _remove_contact_point_from_config(cls, config, name_in_alerting) -> dict:
+ receivers = config["alertmanager_config"]["receivers"]
+ route = config["alertmanager_config"]["route"]
+
+ config["alertmanager_config"]["route"] = cls._recursive_remove_routes(route, name_in_alerting)
+
+ updated_receivers = [receiver for receiver in receivers if receiver["name"] != name_in_alerting]
+ config["alertmanager_config"]["receivers"] = updated_receivers
+
+ return config
+
+ @classmethod
+ def _recursive_remove_routes(cls, alerting_route, name_in_alerting) -> dict:
+ routes = alerting_route.get("routes", [])
+ alerting_route["routes"] = [route for route in routes if route["receiver"] != name_in_alerting]
+
+ for idx, nested_route in enumerate(alerting_route["routes"]):
+ if nested_route.get("routes"):
+ alerting_route["routes"][idx] = cls._recursive_remove_routes(nested_route, name_in_alerting)
+
+ return alerting_route
+
+ def find_name_of_contact_point(self, contact_point_uid, datasource_id, receivers) -> str:
+ if datasource_id == GrafanaAlertingSyncManager.GRAFANA_CONTACT_POINT:
+ name_in_alerting = self._find_name_of_contact_point_by_uid(contact_point_uid, receivers)
+ else:
+ name_in_alerting = self._find_name_of_contact_point_by_integration_url(receivers)
+ return name_in_alerting
+
+ def _find_name_of_contact_point_by_uid(self, contact_point_uid, receivers) -> str:
+ """Find name of contact point for grafana datasource"""
+ name_in_alerting = None
+ # find name of contact point in alerting config by contact point uid
+ for receiver in receivers:
+ receiver_configs = receiver["grafana_managed_receiver_configs"]
+ for receiver_config in receiver_configs:
+ if receiver_config["uid"] == contact_point_uid:
+ name_in_alerting = receiver_config["name"]
+ break
+ if name_in_alerting:
+ break
+ return name_in_alerting
+
+ def _find_name_of_contact_point_by_integration_url(self, receivers) -> str:
+ """Find name of contact point for nongrafana datasource"""
+ name_in_alerting = None
+ integration_url = self.alert_receive_channel.integration_url
+ # find name of contact point in alerting config by contact point uid
+ for receiver in receivers:
+ webhook_configs = receiver.get("webhook_configs", [])
+ for webhook_config in webhook_configs:
+ if webhook_config["url"] == integration_url:
+ name_in_alerting = receiver["name"]
+ break
+ if name_in_alerting:
+ break
+ return name_in_alerting
+
+ def get_datasource_name(self, datasource_id) -> str:
+ datasource, _ = self.client.get_datasource(datasource_id)
+ return datasource["name"]
diff --git a/engine/apps/alerts/incident_appearance/__init__.py b/engine/apps/alerts/incident_appearance/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/alerts/incident_appearance/renderers/__init__.py b/engine/apps/alerts/incident_appearance/renderers/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/alerts/incident_appearance/renderers/base_renderer.py b/engine/apps/alerts/incident_appearance/renderers/base_renderer.py
new file mode 100644
index 0000000000..234c80383a
--- /dev/null
+++ b/engine/apps/alerts/incident_appearance/renderers/base_renderer.py
@@ -0,0 +1,28 @@
+from abc import ABC, abstractmethod
+
+from django.utils.functional import cached_property
+
+
+class AlertBaseRenderer(ABC):
+ def __init__(self, alert):
+ self.alert = alert
+
+ @cached_property
+ def templated_alert(self):
+ return self.templater_class(self.alert).render()
+
+ @property
+ @abstractmethod
+ def templater_class(self):
+ raise NotImplementedError
+
+
+class AlertGroupBaseRenderer(ABC):
+ def __init__(self, alert_group):
+ self.alert_group = alert_group
+ self.alert_renderer = self.alert_renderer_class(self.alert_group.alerts.first())
+
+ @property
+ @abstractmethod
+ def alert_renderer_class(self):
+ raise NotImplementedError
diff --git a/engine/apps/alerts/incident_appearance/renderers/constants.py b/engine/apps/alerts/incident_appearance/renderers/constants.py
new file mode 100644
index 0000000000..6ccaf77b43
--- /dev/null
+++ b/engine/apps/alerts/incident_appearance/renderers/constants.py
@@ -0,0 +1 @@
+DEFAULT_BACKUP_TITLE = "Incident"
diff --git a/engine/apps/alerts/incident_appearance/renderers/email_renderer.py b/engine/apps/alerts/incident_appearance/renderers/email_renderer.py
new file mode 100644
index 0000000000..5107988b71
--- /dev/null
+++ b/engine/apps/alerts/incident_appearance/renderers/email_renderer.py
@@ -0,0 +1,42 @@
+from django.template.loader import render_to_string
+
+from apps.alerts.incident_appearance.renderers.base_renderer import AlertBaseRenderer, AlertGroupBaseRenderer
+from apps.alerts.incident_appearance.renderers.constants import DEFAULT_BACKUP_TITLE
+from apps.alerts.incident_appearance.templaters import AlertEmailTemplater
+from common.utils import str_or_backup
+
+
+class AlertEmailRenderer(AlertBaseRenderer):
+ @property
+ def templater_class(self):
+ return AlertEmailTemplater
+
+
+class AlertGroupEmailRenderer(AlertGroupBaseRenderer):
+ @property
+ def alert_renderer_class(self):
+ return AlertEmailRenderer
+
+ def render(self, limit_notification=False):
+ subject = "You are invited to check an incident from Grafana OnCall"
+ templated_alert = self.alert_renderer.templated_alert
+
+ title_fallback = (
+ f"#{self.alert_group.inside_organization_number} "
+ f"{DEFAULT_BACKUP_TITLE} via {self.alert_group.channel.verbal_name}"
+ )
+
+ content = render_to_string(
+ "email_notification.html",
+ {
+ "url": self.alert_group.permalink or self.alert_group.web_link,
+ "title": str_or_backup(templated_alert.title, title_fallback),
+ "message": str_or_backup(templated_alert.message, ""), # not render message it all if smth go wrong
+ "amixr_team": self.alert_group.channel.organization,
+ "alert_channel": self.alert_group.channel.short_name,
+ "limit_notification": limit_notification,
+ "emails_left": self.alert_group.channel.organization.emails_left,
+ },
+ )
+
+ return subject, content
diff --git a/engine/apps/alerts/incident_appearance/renderers/phone_call_renderer.py b/engine/apps/alerts/incident_appearance/renderers/phone_call_renderer.py
new file mode 100644
index 0000000000..b804aaf825
--- /dev/null
+++ b/engine/apps/alerts/incident_appearance/renderers/phone_call_renderer.py
@@ -0,0 +1,33 @@
+from apps.alerts.incident_appearance.renderers.base_renderer import AlertBaseRenderer, AlertGroupBaseRenderer
+from apps.alerts.incident_appearance.renderers.constants import DEFAULT_BACKUP_TITLE
+from apps.alerts.incident_appearance.templaters import AlertPhoneCallTemplater
+from common.utils import str_or_backup
+
+
+class AlertPhoneCallRenderer(AlertBaseRenderer):
+ @property
+ def templater_class(self):
+ return AlertPhoneCallTemplater
+
+
+class AlertGroupPhoneCallRenderer(AlertGroupBaseRenderer):
+ TEMPLATE = (
+ "You are invited to check an incident from Grafana OnCall. "
+ "Alert via {integration_name} with title {title} triggered {alert_count} times"
+ )
+
+ @property
+ def alert_renderer_class(self):
+ return AlertPhoneCallRenderer
+
+ def render(self):
+ templated_alert = self.alert_renderer.templated_alert
+ title = str_or_backup(templated_alert.title, DEFAULT_BACKUP_TITLE)
+
+ text = self.TEMPLATE.format(
+ integration_name=self.alert_group.channel.short_name,
+ title=title,
+ alert_count=self.alert_group.alerts.count(),
+ )
+
+ return text
diff --git a/engine/apps/alerts/incident_appearance/renderers/slack_renderer.py b/engine/apps/alerts/incident_appearance/renderers/slack_renderer.py
new file mode 100644
index 0000000000..f36fb2bbf2
--- /dev/null
+++ b/engine/apps/alerts/incident_appearance/renderers/slack_renderer.py
@@ -0,0 +1,391 @@
+import json
+
+from django.apps import apps
+
+from apps.alerts.incident_appearance.renderers.base_renderer import AlertBaseRenderer, AlertGroupBaseRenderer
+from apps.alerts.incident_appearance.templaters import AlertSlackTemplater
+from apps.slack.scenarios.scenario_step import ScenarioStep
+from common.utils import is_string_with_visible_characters, str_or_backup
+
+
+class AlertSlackRenderer(AlertBaseRenderer):
+ def __init__(self, alert):
+ super().__init__(alert)
+ self.channel = alert.group.channel
+
+ @property
+ def templater_class(self):
+ return AlertSlackTemplater
+
+ def render_alert_blocks(self):
+ blocks = []
+
+ blocks.append(
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": str_or_backup(self.templated_alert.title, "Alert"),
+ },
+ }
+ )
+ if is_string_with_visible_characters(self.templated_alert.message):
+ message = self.templated_alert.message
+ BLOCK_SECTION_TEXT_MAX_SIZE = 2800
+ if len(message) > BLOCK_SECTION_TEXT_MAX_SIZE:
+ message = (
+ message[: BLOCK_SECTION_TEXT_MAX_SIZE - 3] + "... Message has been trimmed. "
+ "Check the whole content in Web"
+ )
+ blocks.append({"type": "section", "text": {"type": "mrkdwn", "text": message}})
+ return blocks
+
+ def render_alert_attachments(self):
+ attachments = []
+ if is_string_with_visible_characters(self.templated_alert.image_url):
+ attachments.append(
+ {
+ "fallback": "{}: {}".format(self.channel.get_integration_display(), self.alert.title),
+ "title": "{} via Grafana OnCall".format(self.channel.get_integration_display()),
+ "title_link": self.templated_alert.source_link,
+ "callback_id": "alert",
+ "text": "",
+ "image_url": self.templated_alert.image_url,
+ }
+ )
+ return attachments
+
+
+class AlertGroupSlackRenderer(AlertGroupBaseRenderer):
+ def __init__(self, alert_group):
+ super().__init__(alert_group)
+
+ # render the last alert content as Slack message, so Slack message is updated when a new alert comes
+ self.alert_renderer = self.alert_renderer_class(self.alert_group.alerts.last())
+
+ @property
+ def alert_renderer_class(self):
+ return AlertSlackRenderer
+
+ def render_alert_group_blocks(self):
+ non_resolve_alerts_queryset = self.alert_group.alerts.filter(is_resolve_signal=False)
+ if not self.alert_group.channel.organization.slack_team_identity.installed_via_granular_permissions:
+ blocks = [
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": ":warning: *Action required - reinstall app*\n"
+ "Slack is deprecating current permission model. We will support it till DATE\n" # TODO: deprecation date
+ "Don't worry - we migrate OnCall to new one, but it required to reinstall app."
+ 'Press "Upgrade" button to see more detailed instruction and upgrade.',
+ },
+ },
+ {"type": "divider"},
+ {
+ "type": "actions",
+ "elements": [
+ {
+ "type": "button",
+ "text": {
+ "type": "plain_text",
+ "text": "Upgrade",
+ },
+ "value": "click_me_123",
+ "url": self.alert_group.channel.organization.web_slack_page_link,
+ },
+ ],
+ },
+ ]
+ else:
+ blocks = []
+ if non_resolve_alerts_queryset.count() <= 1:
+ blocks.extend(self.alert_renderer.render_alert_blocks())
+ else:
+ blocks.extend(self._get_alert_group_base_blocks_if_grouped())
+ return blocks
+
+ def render_alert_group_attachments(self):
+ attachments = self.alert_renderer.render_alert_attachments()
+
+ if self.alert_group.root_alert_group is not None:
+ slack_message = self.alert_group.root_alert_group.slack_message
+ root_ag_name = self.alert_group.root_alert_group.long_verbose_name_without_formatting
+ if slack_message:
+ footer_text = f"Attached to *<{slack_message.permalink}|{root_ag_name}>*"
+ else:
+ footer_text = (f"Attached to *{root_ag_name}*",)
+ attachments.extend(
+ [
+ {
+ "fallback": "Subscription...",
+ "footer": footer_text,
+ "color": "danger",
+ "mrkdwn": True,
+ "callback_id": "subscription notification",
+ "actions": [
+ {
+ "name": ScenarioStep.get_step("distribute_alerts", "UnAttachGroupStep").routing_uid(),
+ "text": "Unattach",
+ "type": "button",
+ "value": json.dumps({"organization_id": self.alert_group.channel.organization_id}),
+ }
+ ],
+ }
+ ]
+ )
+ if self.alert_group.root_alert_group.acknowledged:
+ attachments[0]["color"] = "warning"
+ if self.alert_group.root_alert_group.resolved:
+ attachments[0]["color"] = "good"
+ attachments[0]["actions"] = []
+ return attachments
+
+ # Attaching buttons
+ if self.alert_group.wiped_at is None:
+ attachment_alert_buttons = self._get_buttons_attachments()
+ if len(attachment_alert_buttons["blocks"][0]["elements"]) > 0:
+ attachments.append(attachment_alert_buttons)
+
+ # Attaching resolve information
+ if self.alert_group.resolved:
+ resolve_attachment = {
+ "fallback": "Resolved...",
+ "text": self.alert_group.get_resolve_text(mention_user=True),
+ "callback_id": "alert",
+ }
+ attachments.append(resolve_attachment)
+ else:
+ if self.alert_group.acknowledged:
+ ack_attachment = {
+ "fallback": "Acknowledged...",
+ "text": self.alert_group.get_acknowledge_text(mention_user=True),
+ "callback_id": "alert",
+ }
+ attachments.append(ack_attachment)
+
+ # Attaching invitation info
+ if not self.alert_group.resolved:
+ attachments += self._get_invitation_attachment()
+
+ attachments = self._set_attachments_color(attachments)
+ return attachments
+
+ def _set_attachments_color(self, attachments):
+ color = "#a30200" # danger
+ if self.alert_group.silenced:
+ color = "#dddddd" # slack-grey
+ if self.alert_group.acknowledged:
+ color = "#daa038" # warning
+ if self.alert_group.resolved:
+ color = "#2eb886" # good
+ for attachment in attachments:
+ attachment["color"] = color
+ return attachments
+
+ def _get_text_alert_grouped(self):
+ alert_count = self.alert_group.alerts.count()
+ link = self.alert_group.web_link
+
+ text = (
+ f":package: Showing the last alert only out of {alert_count} total. "
+ f"Visit <{link}|the plugin page> to see them all."
+ )
+
+ return text
+
+ def _get_alert_group_base_blocks_if_grouped(self):
+ text = self._get_text_alert_grouped()
+ blocks = self.alert_renderer.render_alert_blocks()
+ blocks.append({"type": "context", "elements": [{"type": "mrkdwn", "text": text}]})
+ return blocks
+
+ def _get_buttons_attachments(self):
+ attachment = {"blocks": self._get_buttons_blocks()}
+ return attachment
+
+ def _get_buttons_blocks(self):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ buttons = []
+ if self.alert_group.maintenance_uuid is None:
+ if not self.alert_group.resolved:
+ if not self.alert_group.acknowledged:
+ buttons.append(
+ {
+ "text": {
+ "type": "plain_text",
+ "text": "Acknowledge",
+ "emoji": True,
+ },
+ "type": "button",
+ "value": json.dumps({"organization_id": self.alert_group.channel.organization_id}),
+ "action_id": ScenarioStep.get_step(
+ "distribute_alerts",
+ "AcknowledgeGroupStep",
+ ).routing_uid(),
+ },
+ )
+ else:
+ buttons.append(
+ {
+ "text": {
+ "type": "plain_text",
+ "text": "Unacknowledge",
+ "emoji": True,
+ },
+ "type": "button",
+ "value": json.dumps({"organization_id": self.alert_group.channel.organization_id}),
+ "action_id": ScenarioStep.get_step(
+ "distribute_alerts",
+ "UnAcknowledgeGroupStep",
+ ).routing_uid(),
+ },
+ )
+ buttons.append(
+ {
+ "text": {"type": "plain_text", "text": "Resolve", "emoji": True},
+ "type": "button",
+ "style": "primary",
+ "value": json.dumps({"organization_id": self.alert_group.channel.organization_id}),
+ "action_id": ScenarioStep.get_step("distribute_alerts", "ResolveGroupStep").routing_uid(),
+ },
+ )
+
+ if self.alert_group.invitations.filter(is_active=True).count() < 5:
+ slack_team_identity = self.alert_group.channel.organization.slack_team_identity
+ action_id = ScenarioStep.get_step("distribute_alerts", "InviteOtherPersonToIncident").routing_uid()
+ text = "Invite..."
+ invitation_element = ScenarioStep(
+ slack_team_identity,
+ self.alert_group.channel.organization,
+ ).get_select_user_element(action_id, text=text)
+ buttons.append(invitation_element)
+ if not self.alert_group.acknowledged:
+ if not self.alert_group.silenced:
+ silence_options = [
+ {"text": {"type": "plain_text", "text": text, "emoji": True}, "value": str(value)}
+ for value, text in AlertGroup.SILENCE_DELAY_OPTIONS
+ ]
+ buttons.append(
+ {
+ "placeholder": {"type": "plain_text", "text": "Silence", "emoji": True},
+ "type": "static_select",
+ "options": silence_options,
+ "action_id": ScenarioStep.get_step(
+ "distribute_alerts", "SilenceGroupStep"
+ ).routing_uid(),
+ # "value": json.dumps({"organization_id": self.alert_group.channel.organization_id}),
+ }
+ )
+ else:
+ buttons.append(
+ {
+ "text": {"type": "plain_text", "text": "Unsilence", "emoji": True},
+ "type": "button",
+ "value": json.dumps({"organization_id": self.alert_group.channel.organization_id}),
+ "action_id": ScenarioStep.get_step(
+ "distribute_alerts", "UnSilenceGroupStep"
+ ).routing_uid(),
+ },
+ )
+ attach_button = {
+ "text": {"type": "plain_text", "text": "Attach to ...", "emoji": True},
+ "type": "button",
+ "action_id": ScenarioStep.get_step("distribute_alerts", "SelectAttachGroupStep").routing_uid(),
+ "value": json.dumps(
+ {
+ "alert_group_pk": self.alert_group.pk,
+ "organization_id": self.alert_group.channel.organization_id,
+ }
+ ),
+ }
+ buttons.append(attach_button)
+ else:
+ buttons.append(
+ {
+ "text": {"type": "plain_text", "text": "Unresolve", "emoji": True},
+ "type": "button",
+ "value": json.dumps({"organization_id": self.alert_group.channel.organization_id}),
+ "action_id": ScenarioStep.get_step("distribute_alerts", "UnResolveGroupStep").routing_uid(),
+ },
+ )
+
+ if self.alert_group.channel.is_available_for_custom_templates:
+ buttons.append(
+ {
+ "text": {"type": "plain_text", "text": ":mag: Format Alert", "emoji": True},
+ "type": "button",
+ "value": json.dumps(
+ {
+ "alert_group_pk": str(self.alert_group.pk),
+ "organization_id": self.alert_group.channel.organization_id,
+ }
+ ),
+ "action_id": ScenarioStep.get_step(
+ "alertgroup_appearance", "OpenAlertAppearanceDialogStep"
+ ).routing_uid(),
+ },
+ )
+
+ # Resolution notes button
+ resolution_notes_count = self.alert_group.resolution_notes.count()
+ resolution_notes_button = {
+ "text": {
+ "type": "plain_text",
+ "text": "Resolution notes [{}]".format(resolution_notes_count),
+ "emoji": True,
+ },
+ "type": "button",
+ "action_id": ScenarioStep.get_step("resolution_note", "ResolutionNoteModalStep").routing_uid(),
+ "value": json.dumps(
+ {
+ "resolution_note_window_action": "edit",
+ "alert_group_pk": self.alert_group.pk,
+ "organization_id": self.alert_group.channel.organization_id,
+ }
+ ),
+ }
+ if resolution_notes_count == 0:
+ resolution_notes_button["style"] = "primary"
+ resolution_notes_button["text"]["text"] = "Add Resolution notes"
+ buttons.append(resolution_notes_button)
+ else:
+ if not self.alert_group.resolved:
+ buttons.append(
+ {
+ "text": {"type": "plain_text", "text": "Resolve", "emoji": True},
+ "type": "button",
+ "style": "primary",
+ "value": json.dumps({"organization_id": self.alert_group.channel.organization_id}),
+ "action_id": ScenarioStep.get_step("distribute_alerts", "ResolveGroupStep").routing_uid(),
+ },
+ )
+ blocks = [{"type": "actions", "elements": buttons}]
+ return blocks
+
+ def _get_invitation_attachment(self):
+ Invitation = apps.get_model("alerts", "Invitation")
+ invitations = Invitation.objects.filter(is_active=True, alert_group=self.alert_group).all()
+ if len(invitations) == 0:
+ return []
+ buttons = []
+ for invitation in invitations:
+ invitee_name = invitation.invitee.get_user_verbal_for_team_for_slack()
+ buttons.append(
+ {
+ "name": "{}_{}".format(
+ ScenarioStep.get_step("distribute_alerts", "StopInvitationProcess").routing_uid(), invitation.pk
+ ),
+ "text": "Stop inviting {}".format(invitee_name),
+ "type": "button",
+ "style": "primary",
+ "value": json.dumps({"organization_id": self.alert_group.channel.organization_id}),
+ },
+ )
+ return [
+ {
+ "fallback": "Invitations...",
+ "callback_id": "invitations",
+ "actions": buttons,
+ }
+ ]
diff --git a/engine/apps/alerts/incident_appearance/renderers/sms_renderer.py b/engine/apps/alerts/incident_appearance/renderers/sms_renderer.py
new file mode 100644
index 0000000000..f913eeb43b
--- /dev/null
+++ b/engine/apps/alerts/incident_appearance/renderers/sms_renderer.py
@@ -0,0 +1,32 @@
+from apps.alerts.incident_appearance.renderers.base_renderer import AlertBaseRenderer, AlertGroupBaseRenderer
+from apps.alerts.incident_appearance.renderers.constants import DEFAULT_BACKUP_TITLE
+from apps.alerts.incident_appearance.templaters import AlertSmsTemplater
+from common.utils import str_or_backup
+
+
+class AlertSmsRenderer(AlertBaseRenderer):
+ @property
+ def templater_class(self):
+ return AlertSmsTemplater
+
+
+class AlertGroupSmsRenderer(AlertGroupBaseRenderer):
+ @property
+ def alert_renderer_class(self):
+ return AlertSmsRenderer
+
+ def render(self):
+ templated_alert = self.alert_renderer.templated_alert
+ title = str_or_backup(templated_alert.title, DEFAULT_BACKUP_TITLE)
+ if self.alert_group.channel.organization.slack_team_identity and (permalink := self.alert_group.permalink):
+ incident_link = permalink
+ else:
+ incident_link = self.alert_group.web_link
+ return (
+ f"You are invited to check an incident #{self.alert_group.inside_organization_number} with title "
+ f'"{title}" in Grafana OnCall organization: "{self.alert_group.channel.organization.org_title}", '
+ f"alert channel: {self.alert_group.channel.short_name}, "
+ f"alerts registered: {self.alert_group.alerts.count()}, "
+ f"{incident_link}\n"
+ f"Your Grafana OnCall <3"
+ )
diff --git a/engine/apps/alerts/incident_appearance/renderers/telegram_renderer.py b/engine/apps/alerts/incident_appearance/renderers/telegram_renderer.py
new file mode 100644
index 0000000000..edc89dd44b
--- /dev/null
+++ b/engine/apps/alerts/incident_appearance/renderers/telegram_renderer.py
@@ -0,0 +1,64 @@
+from emoji import emojize
+
+from apps.alerts.incident_appearance.renderers.base_renderer import AlertBaseRenderer, AlertGroupBaseRenderer
+from apps.alerts.incident_appearance.renderers.constants import DEFAULT_BACKUP_TITLE
+from apps.alerts.incident_appearance.templaters import AlertTelegramTemplater
+from common.utils import str_or_backup
+
+
+class AlertTelegramRenderer(AlertBaseRenderer):
+ @property
+ def templater_class(self):
+ return AlertTelegramTemplater
+
+
+class AlertGroupTelegramRenderer(AlertGroupBaseRenderer):
+ def __init__(self, alert_group):
+ super().__init__(alert_group)
+
+ # render the last alert content as a Telegram message, so Telegram message is updated when a new alert comes
+ self.alert_renderer = self.alert_renderer_class(self.alert_group.alerts.last())
+
+ @property
+ def alert_renderer_class(self):
+ return AlertTelegramRenderer
+
+ def render(self):
+ templated_alert = self.alert_renderer.templated_alert
+ title = str_or_backup(templated_alert.title, DEFAULT_BACKUP_TITLE)
+ message = templated_alert.message
+ image_url = templated_alert.image_url
+
+ alerts_count = self.alert_group.alerts.count()
+ if alerts_count <= 10:
+ alerts_count_str = str(alerts_count)
+ else:
+ alert_count_rounded = (alerts_count // 10) * 10
+ alerts_count_str = f"{alert_count_rounded}+"
+
+ status_emoji = "🔴"
+ if self.alert_group.resolved:
+ status_emoji = "🟢"
+ elif self.alert_group.acknowledged:
+ status_emoji = "🟠"
+ elif self.alert_group.silenced:
+ status_emoji = "⚪️" # white circle
+
+ status_verbose = "Alerting"
+ if self.alert_group.resolved:
+ status_verbose = self.alert_group.get_resolve_text()
+ elif self.alert_group.acknowledged:
+ status_verbose = self.alert_group.get_acknowledge_text()
+
+ text = f"{status_emoji} #{self.alert_group.inside_organization_number}, {title}\n"
+ text += f"{status_verbose}, alerts: {alerts_count_str}\n"
+ text += f"Source: {self.alert_group.channel.short_name}\n"
+ text += f"{self.alert_group.web_link}"
+
+ if message:
+ text += f"\n\n{message}"
+
+ if image_url is not None:
+ text = f" " + text
+
+ return emojize(text, use_aliases=True)
diff --git a/engine/apps/alerts/incident_appearance/renderers/web_renderer.py b/engine/apps/alerts/incident_appearance/renderers/web_renderer.py
new file mode 100644
index 0000000000..f7eecbab49
--- /dev/null
+++ b/engine/apps/alerts/incident_appearance/renderers/web_renderer.py
@@ -0,0 +1,34 @@
+from apps.alerts.incident_appearance.renderers.base_renderer import AlertBaseRenderer, AlertGroupBaseRenderer
+from apps.alerts.incident_appearance.templaters import AlertWebTemplater
+from common.utils import str_or_backup
+
+
+class AlertWebRenderer(AlertBaseRenderer):
+ @property
+ def templater_class(self):
+ return AlertWebTemplater
+
+ def render(self):
+ templated_alert = self.templated_alert
+ rendered_alert = {
+ "title": str_or_backup(templated_alert.title, "Alert"),
+ "message": str_or_backup(templated_alert.message, ""),
+ "image_url": str_or_backup(templated_alert.image_url, None),
+ "source_link": str_or_backup(templated_alert.image_url, None),
+ }
+ return rendered_alert
+
+
+class AlertGroupWebRenderer(AlertGroupBaseRenderer):
+ def __init__(self, alert_group):
+ super().__init__(alert_group)
+
+ # use the last alert to render content
+ self.alert_renderer = self.alert_renderer_class(self.alert_group.alerts.last())
+
+ @property
+ def alert_renderer_class(self):
+ return AlertWebRenderer
+
+ def render(self):
+ return self.alert_renderer.render()
diff --git a/engine/apps/alerts/incident_appearance/templaters/__init__.py b/engine/apps/alerts/incident_appearance/templaters/__init__.py
new file mode 100644
index 0000000000..21d72121f8
--- /dev/null
+++ b/engine/apps/alerts/incident_appearance/templaters/__init__.py
@@ -0,0 +1,7 @@
+from .alert_templater import TemplateLoader # noqa: F401
+from .email_templater import AlertEmailTemplater # noqa: F401
+from .phone_call_templater import AlertPhoneCallTemplater # noqa: F401
+from .slack_templater import AlertSlackTemplater # noqa: F401
+from .sms_templater import AlertSmsTemplater # noqa: F401
+from .telegram_templater import AlertTelegramTemplater # noqa: F401
+from .web_templater import AlertWebTemplater # noqa: F401
diff --git a/engine/apps/alerts/incident_appearance/templaters/alert_templater.py b/engine/apps/alerts/incident_appearance/templaters/alert_templater.py
new file mode 100644
index 0000000000..052313c44c
--- /dev/null
+++ b/engine/apps/alerts/incident_appearance/templaters/alert_templater.py
@@ -0,0 +1,183 @@
+from abc import ABC, abstractmethod
+from dataclasses import dataclass
+
+from apps.base.messaging import get_messaging_backend_from_id
+from apps.slack.slack_formatter import SlackFormatter
+from common.jinja_templater import apply_jinja_template
+
+
+class TemplateLoader:
+ def get_attr_template(self, attr, alert_receive_channel, render_for=None):
+ """
+ Trying to get attr template.
+ First trying to get template for given combination of notification way and attr.
+ If template is None - trying to get default template.
+ If default template doesn't exist return None.
+ """
+
+ attr_name_for_template = self._get_attr_name_for_template(attr, alert_receive_channel, render_for)
+ attr_template = getattr(alert_receive_channel, attr_name_for_template, None)
+ if attr_template is None and render_for is not None:
+ # check for additional messaging backend templates
+ attr_template = alert_receive_channel.get_template_attribute(render_for, attr)
+ return attr_template or self.get_default_attr_template(attr, alert_receive_channel, render_for)
+
+ def get_default_attr_template(self, attr, alert_receive_channel, render_for=None):
+ default_attr_template_dict = self._get_dict_of_default_templates(attr, alert_receive_channel, render_for)
+ default_attr_template = default_attr_template_dict.get(alert_receive_channel.integration)
+ if default_attr_template is None and render_for is not None:
+ # check for additional messaging backend templates
+ default_attr_template = alert_receive_channel.get_default_template_attribute(render_for, attr)
+ return default_attr_template
+
+ @staticmethod
+ def _get_attr_name_for_template(attr, alert_receive_channel, render_for):
+ """
+ Get appropriate attribute name for template of alert receive channel
+ First tries to get renderer specific attribute name, e.g. "slack_title_template"
+ If it doesn't exist, fallbacks to common attribute name, e.g. "title_template"
+ """
+ if render_for is not None:
+ renderer_specific_attr_name = f"{render_for}_{attr}_template"
+ if hasattr(alert_receive_channel, renderer_specific_attr_name):
+ return renderer_specific_attr_name
+
+ return f"{attr}_template"
+
+ @staticmethod
+ def _get_dict_of_default_templates(attr, alert_receive_channel, render_for):
+ """
+ Get dict containing default templates for alert receive channel
+ First tries to get renderer specific attribute name, e.g. "INTEGRATION_TO_DEFAULT_SLACK_TITLE_TEMPLATE"
+ If it doesn't exist, fallbacks to common attribute name, e.g. "INTEGRATION_TO_DEFAULT_TITLE_TEMPLATE"
+ """
+ if render_for is not None:
+ templates_dict_attr_name = f"INTEGRATION_TO_DEFAULT_{render_for.upper()}_{attr.upper()}_TEMPLATE"
+
+ if hasattr(alert_receive_channel, templates_dict_attr_name):
+ return getattr(alert_receive_channel, templates_dict_attr_name)
+
+ return getattr(alert_receive_channel, f"INTEGRATION_TO_DEFAULT_{attr.upper()}_TEMPLATE", {})
+
+
+@dataclass
+class TemplatedAlert:
+ title: str = None
+ message: str = None
+ image_url: str = None
+ source_link: str = None
+
+
+class AlertTemplater(ABC):
+ def __init__(self, alert):
+ self.alert = alert
+ self.slack_formatter = SlackFormatter(alert.group.channel.organization)
+ self.template_manager = TemplateLoader()
+ self.incident_id = self.alert.group.inside_organization_number
+ self.link = self.alert.group.web_link
+
+ def render(self):
+ """
+ Rendering pipeline:
+ 1. preformatting - recursively traverses alert's raw request data and apply _preformat to string nodes
+ 2. applying templates - apply jinja templates to alert's raw request data
+ 3. postformatting - apply _postformat to the templated alert.
+ :return:
+ """
+ if self._apply_preformatting():
+ data = self._preformat_request_data(self.alert.raw_request_data)
+ else:
+ data = self.alert.raw_request_data
+ templated_alert = self._apply_templates(data)
+ templated_alert = self._postformat(templated_alert)
+ return templated_alert
+
+ def _apply_preformatting(self):
+ """
+ By default templater doesn't modify raw request data.
+ If it is needed in concrete templater override this method.
+ """
+ return False
+
+ def _preformat_request_data(self, request_data):
+ if isinstance(request_data, dict):
+ preformatted_data = {}
+ for key in request_data.keys():
+ preformatted_data[key] = self._preformat_request_data(request_data[key])
+ elif isinstance(request_data, list):
+ preformatted_data = []
+ for value in request_data:
+ preformatted_data.append(self._preformat_request_data(value))
+ elif isinstance(request_data, str):
+ preformatted_data = self._preformat(request_data)
+ else:
+ preformatted_data = request_data
+ return preformatted_data
+
+ def _preformat(self, data):
+ return data
+
+ def _postformat(self, templated_alert):
+ return templated_alert
+
+ def _apply_templates(self, data):
+ channel = self.alert.group.channel
+
+ # it's important that source_link comes before title,
+ # since source_link is used to compute title
+ attrs_to_render = ["source_link", "title", "message", "image_url"]
+
+ templated_alert = TemplatedAlert()
+
+ for attr in attrs_to_render:
+ # determine is attr needs rendering by presence of appropriate template
+ # for given combination of notification channel and attr.
+
+ backend_id = self._render_for()
+ message_backend = None
+ if backend_id:
+ message_backend = get_messaging_backend_from_id(backend_id.upper())
+
+ need_rendering = (
+ hasattr(channel, f"{self._render_for()}_{attr}_template")
+ or hasattr(channel, f"{attr}_template")
+ or message_backend is not None
+ )
+
+ if need_rendering:
+ rendered_attr = self._render_attribute_with_template(
+ attr,
+ data,
+ channel,
+ templated_alert,
+ )
+ if rendered_attr == "None":
+ rendered_attr = None
+ setattr(templated_alert, attr, rendered_attr)
+
+ return templated_alert
+
+ def _render_attribute_with_template(self, attr, data, channel, templated_alert):
+ """
+ Get attr template and then apply it.
+ If attr template is None or invalid will return None.
+ """
+ attr_template = self.template_manager.get_attr_template(attr, channel, self._render_for())
+ if attr_template is not None:
+ context = {
+ "grafana_oncall_incident_id": self.incident_id,
+ "grafana_oncall_link": self.link,
+ "integration_name": channel.verbal_name,
+ "source_link": templated_alert.source_link,
+ "amixr_incident_id": self.incident_id, # TODO: decide on variable names
+ "amixr_link": self.link, # TODO: decide on variable names
+ }
+ templated_attr, success = apply_jinja_template(attr_template, data, **context)
+ if success:
+ return templated_attr
+
+ return None
+
+ @abstractmethod
+ def _render_for(self):
+ raise NotImplementedError
diff --git a/engine/apps/alerts/incident_appearance/templaters/email_templater.py b/engine/apps/alerts/incident_appearance/templaters/email_templater.py
new file mode 100644
index 0000000000..48870848f6
--- /dev/null
+++ b/engine/apps/alerts/incident_appearance/templaters/email_templater.py
@@ -0,0 +1,18 @@
+from apps.alerts.incident_appearance.templaters.alert_templater import AlertTemplater
+
+
+class AlertEmailTemplater(AlertTemplater):
+ RENDER_FOR_EMAIL = "email"
+
+ def _render_for(self):
+ return self.RENDER_FOR_EMAIL
+
+ def _postformat(self, templated_alert):
+ templated_alert.title = self._slack_format_for_email(templated_alert.title)
+ templated_alert.message = self._slack_format_for_email(templated_alert.message)
+ return templated_alert
+
+ def _slack_format_for_email(self, data):
+ sf = self.slack_formatter
+ sf.hyperlink_mention_format = "{title} - {url}"
+ return sf.format(data)
diff --git a/engine/apps/alerts/incident_appearance/templaters/phone_call_templater.py b/engine/apps/alerts/incident_appearance/templaters/phone_call_templater.py
new file mode 100644
index 0000000000..6f9997d728
--- /dev/null
+++ b/engine/apps/alerts/incident_appearance/templaters/phone_call_templater.py
@@ -0,0 +1,31 @@
+from apps.alerts.incident_appearance.templaters.alert_templater import AlertTemplater
+from common.utils import clean_markup
+
+
+class AlertPhoneCallTemplater(AlertTemplater):
+ RENDER_FOR_PHONE_CALL = "phone_call"
+
+ def _render_for(self):
+ return self.RENDER_FOR_PHONE_CALL
+
+ def _postformat(self, templated_alert):
+ templated_alert.title = self._postformat_pipeline(templated_alert.title)
+ templated_alert.message = self._postformat_pipeline(templated_alert.message)
+ return templated_alert
+
+ def _postformat_pipeline(self, text):
+ return self._escape(clean_markup(self._slack_format_for_phone_call(text))) if text is not None else text
+
+ def _slack_format_for_phone_call(self, data):
+ sf = self.slack_formatter
+ sf.user_mention_format = "{}"
+ sf.channel_mention_format = "#{}"
+ sf.hyperlink_mention_format = "{title}"
+ return sf.format(data)
+
+ def _escape(self, data):
+ # https://www.twilio.com/docs/api/errors/12100
+ data = data.replace("&", "&")
+ data = data.replace(">", ">")
+ data = data.replace("<", "<")
+ return data
diff --git a/engine/apps/alerts/incident_appearance/templaters/slack_templater.py b/engine/apps/alerts/incident_appearance/templaters/slack_templater.py
new file mode 100644
index 0000000000..6c6e3efedf
--- /dev/null
+++ b/engine/apps/alerts/incident_appearance/templaters/slack_templater.py
@@ -0,0 +1,14 @@
+from apps.alerts.incident_appearance.templaters.alert_templater import AlertTemplater
+
+
+class AlertSlackTemplater(AlertTemplater):
+ RENDER_FOR_SLACK = "slack"
+
+ def _render_for(self):
+ return self.RENDER_FOR_SLACK
+
+ def _postformat(self, templated_alert):
+ # We need to replace new line characters in slack title because slack markdown would break on multiline titles
+ if templated_alert.title:
+ templated_alert.title = templated_alert.title.replace("\n", "").replace("\r", "")
+ return templated_alert
diff --git a/engine/apps/alerts/incident_appearance/templaters/sms_templater.py b/engine/apps/alerts/incident_appearance/templaters/sms_templater.py
new file mode 100644
index 0000000000..0c9126814f
--- /dev/null
+++ b/engine/apps/alerts/incident_appearance/templaters/sms_templater.py
@@ -0,0 +1,27 @@
+from apps.alerts.incident_appearance.templaters.alert_templater import AlertTemplater
+from common.utils import clean_markup
+
+
+class AlertSmsTemplater(AlertTemplater):
+ RENDER_FOR_SMS = "sms"
+
+ def _render_for(self):
+ return self.RENDER_FOR_SMS
+
+ def _clean_markup(self, data):
+ return clean_markup(data)
+
+ def _postformat(self, templated_alert):
+ templated_alert.title = self._postformat_pipeline(templated_alert.title)
+ templated_alert.message = self._postformat_pipeline(templated_alert.message)
+ return templated_alert
+
+ def _postformat_pipeline(self, text):
+ return clean_markup(self._slack_format_for_sms(text)) if text is not None else text
+
+ def _slack_format_for_sms(self, data):
+ sf = self.slack_formatter
+ sf.user_mention_format = "{}"
+ sf.channel_mention_format = "#{}"
+ sf.hyperlink_mention_format = "{title}"
+ return sf.format(data)
diff --git a/engine/apps/alerts/incident_appearance/templaters/telegram_templater.py b/engine/apps/alerts/incident_appearance/templaters/telegram_templater.py
new file mode 100644
index 0000000000..3cbbab7b2d
--- /dev/null
+++ b/engine/apps/alerts/incident_appearance/templaters/telegram_templater.py
@@ -0,0 +1,29 @@
+from apps.alerts.incident_appearance.templaters.alert_templater import AlertTemplater
+from common.utils import escape_html
+
+
+class AlertTelegramTemplater(AlertTemplater):
+ RENDER_FOR_TELEGRAM = "telegram"
+
+ def _render_for(self):
+ return self.RENDER_FOR_TELEGRAM
+
+ def _preformat(self, data):
+ return escape_html(self._slack_format_for_telegram(data))
+
+ def _apply_preformatting(self):
+ return True
+
+ def _slack_format_for_telegram(self, data):
+ sf = self.slack_formatter
+ sf.channel_mention_format = "{}"
+ sf.user_mention_format = "{}"
+ sf.hyperlink_mention_format = "{title} - {url}"
+ return sf.format(data)
+
+ def _postformat(self, templated_alert):
+ if templated_alert.title:
+ templated_alert.title = self._slack_format_for_telegram(templated_alert.title)
+ if templated_alert.message:
+ templated_alert.message = self._slack_format_for_telegram(templated_alert.message)
+ return templated_alert
diff --git a/engine/apps/alerts/incident_appearance/templaters/web_templater.py b/engine/apps/alerts/incident_appearance/templaters/web_templater.py
new file mode 100644
index 0000000000..913e285783
--- /dev/null
+++ b/engine/apps/alerts/incident_appearance/templaters/web_templater.py
@@ -0,0 +1,36 @@
+import re
+
+from apps.alerts.incident_appearance.templaters.alert_templater import AlertTemplater
+from common.utils import convert_md_to_html, escape_html, url_re, urlize_with_respect_to_a
+
+
+class AlertWebTemplater(AlertTemplater):
+ RENDER_FOR_WEB = "web"
+
+ def _render_for(self):
+ return self.RENDER_FOR_WEB
+
+ def _postformat(self, templated_alert):
+ link_substitution = {}
+ if templated_alert.title:
+ templated_alert.title = escape_html(self._slack_format_for_web(templated_alert.title))
+ if templated_alert.message:
+ message = escape_html(self._slack_format_for_web(templated_alert.message))
+ link_matches = re.findall(url_re, message)
+ for idx, link in enumerate(link_matches):
+ substitution = f"amixrsubstitutedlink{idx}"
+ link_substitution[substitution] = link
+ message = message.replace(link, substitution)
+ message = convert_md_to_html(message)
+ for substitution, original_link in link_substitution.items():
+ message = message.replace(substitution, original_link)
+ templated_alert.message = urlize_with_respect_to_a(message)
+ if templated_alert.image_url:
+ templated_alert.image_url = escape_html(templated_alert.image_url)
+
+ return templated_alert
+
+ def _slack_format_for_web(self, data):
+ sf = self.slack_formatter
+ sf.hyperlink_mention_format = "[title](url)"
+ return sf.format(data)
diff --git a/engine/apps/alerts/incident_log_builder/__init__.py b/engine/apps/alerts/incident_log_builder/__init__.py
new file mode 100644
index 0000000000..129a8c892e
--- /dev/null
+++ b/engine/apps/alerts/incident_log_builder/__init__.py
@@ -0,0 +1 @@
+from .incident_log_builder import IncidentLogBuilder # noqa: F401
diff --git a/engine/apps/alerts/incident_log_builder/incident_log_builder.py b/engine/apps/alerts/incident_log_builder/incident_log_builder.py
new file mode 100644
index 0000000000..c15825513d
--- /dev/null
+++ b/engine/apps/alerts/incident_log_builder/incident_log_builder.py
@@ -0,0 +1,682 @@
+from django.apps import apps
+from django.db.models import Q
+from django.utils import timezone
+
+from apps.base.messaging import get_messaging_backend_from_id
+from apps.schedules.ical_utils import list_users_to_notify_from_ical
+
+
+class IncidentLogBuilder:
+ def __init__(self, alert_group):
+ self.alert_group = alert_group
+
+ def get_log_records_list(self, with_resolution_notes=False):
+ """
+ Generates list with AlertGroupLogRecord and UserNotificationPolicyLogRecord logs
+ :return: list with logs
+ """
+ all_log_records = list()
+ # get logs from AlertGroupLogRecord
+ alert_group_log_records = self._get_log_records_for_after_resolve_report()
+ all_log_records.extend(alert_group_log_records)
+
+ # get logs from UserNotificationPolicyLogRecord
+ user_notification_log_records = self._get_user_notification_log_records_for_log_report()
+ all_log_records.extend(user_notification_log_records)
+
+ if with_resolution_notes:
+ resolution_notes = self._get_resolution_notes()
+ all_log_records.extend(resolution_notes)
+ # sort logs by date
+ all_log_records_sorted = sorted(all_log_records, key=lambda log: log.created_at)
+ return all_log_records_sorted
+
+ def _get_log_records_for_after_resolve_report(self):
+ EscalationPolicy = apps.get_model("alerts", "EscalationPolicy")
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ excluded_log_types = [
+ AlertGroupLogRecord.TYPE_ESCALATION_FINISHED,
+ AlertGroupLogRecord.TYPE_INVITATION_TRIGGERED,
+ AlertGroupLogRecord.TYPE_ACK_REMINDER_TRIGGERED,
+ AlertGroupLogRecord.TYPE_WIPED,
+ AlertGroupLogRecord.TYPE_DELETED,
+ ]
+ excluded_escalation_steps = [EscalationPolicy.STEP_WAIT, EscalationPolicy.STEP_FINAL_RESOLVE]
+ not_excluded_steps_with_author = [
+ EscalationPolicy.STEP_NOTIFY,
+ EscalationPolicy.STEP_NOTIFY_IMPORTANT,
+ EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
+ ]
+
+ # exclude logs that we don't want to see in after resolve report
+ # exclude logs with deleted root or dependent alert group
+ return (
+ self.alert_group.log_records.exclude(
+ Q(
+ Q(type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED)
+ & Q(author__isnull=False)
+ & Q(
+ # new logs with saved escalation step
+ Q(
+ Q(escalation_policy_step__isnull=False)
+ & ~Q(escalation_policy_step__in=not_excluded_steps_with_author)
+ )
+ |
+ # old logs
+ Q(
+ Q(escalation_policy_step__isnull=True, escalation_policy__step__isnull=False)
+ & ~Q(escalation_policy__step__in=not_excluded_steps_with_author)
+ )
+ )
+ )
+ | Q(type__in=excluded_log_types)
+ | Q(escalation_policy_step__in=excluded_escalation_steps)
+ | Q( # new logs with saved escalation step
+ escalation_policy_step__isnull=True, escalation_policy__step__in=excluded_escalation_steps
+ )
+ | Q( # old logs
+ Q(Q(type=AlertGroupLogRecord.TYPE_ATTACHED) | Q(type=AlertGroupLogRecord.TYPE_UNATTACHED))
+ & Q(Q(root_alert_group__isnull=True) & Q(dependent_alert_group__isnull=True))
+ )
+ )
+ .select_related("author")
+ .distinct()
+ .order_by("created_at")
+ )
+
+ def _get_user_notification_log_records_for_log_report(self):
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+ UserNotificationPolicy = apps.get_model("base", "UserNotificationPolicy")
+
+ # exclude user notification logs with step 'wait' or with status 'finished'
+ return (
+ self.alert_group.personal_log_records.exclude(
+ Q(type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FINISHED)
+ | Q(
+ Q(type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED)
+ & Q(notification_policy__step=UserNotificationPolicy.Step.WAIT)
+ )
+ )
+ .select_related("author")
+ .distinct()
+ .order_by("created_at")
+ )
+
+ def _get_resolution_notes(self):
+ return self.alert_group.resolution_notes.select_related("author", "resolution_note_slack_message").order_by(
+ "created_at"
+ )
+
+ def get_incident_escalation_plan(self, for_slack=False):
+ """
+ Generates dict with escalation plan with timedelta as keys and list with plan lines as values
+ :param for_slack: (bool) add user slack id to plan line or not
+ :return:
+ """
+ incident_escalation_plan = dict()
+ incident_escalation_plan = self._add_invitation_plan(incident_escalation_plan, for_slack=for_slack)
+ if not self.alert_group.acknowledged and not self.alert_group.is_silenced_forever:
+ incident_escalation_plan = self._add_escalation_plan(incident_escalation_plan, for_slack=for_slack)
+ final_incident_escalation_plan = self._finalize_escalation_plan_dict(incident_escalation_plan)
+ return final_incident_escalation_plan
+
+ def _add_escalation_plan(self, escalation_plan_dict, for_slack=False):
+ """
+ Returns plan for future escalations
+ :param escalation_plan_dict:
+ :param for_slack:
+ :return: {timedelta: [{"user_id": user.pk, "plan_lines": [#rendered escalation policy line, ]}, ..., ...], ...}
+ """
+ esc_timedelta = timezone.timedelta(seconds=0) # timedelta for next escalation step
+ now = timezone.now()
+
+ # check if escalation snapshot wasn't saved and channel filter was deleted.
+ # We cannot generate escalation plan in this case
+ escalation_snapshot = self.alert_group.escalation_snapshot
+ if escalation_snapshot is None:
+ return escalation_plan_dict
+
+ if self.alert_group.silenced_until:
+ timedelta = self.alert_group.silenced_until - now
+ esc_timedelta += timedelta
+
+ # get starting point for escalation plan, we are not interested in previous escalation logs
+ stop_escalation_log = self.alert_group.last_stop_escalation_log
+
+ # set starting point to 0 if incident wasn't acknowledged or resolved
+ stop_escalation_log_pk = stop_escalation_log.pk if stop_escalation_log else 0
+
+ # render escalation plan from escalation_snapshot
+ escalation_plan_dict = self._render_escalation_plan_from_escalation_snapshot(
+ escalation_plan_dict,
+ stop_escalation_log_pk,
+ esc_timedelta,
+ escalation_snapshot,
+ for_slack,
+ )
+ return escalation_plan_dict
+
+ def _render_escalation_plan_from_escalation_snapshot(
+ self, escalation_plan_dict, stop_escalation_log_pk, esc_timedelta, escalation_snapshot, for_slack=False
+ ):
+ EscalationPolicy = apps.get_model("alerts", "EscalationPolicy")
+ now = timezone.now()
+ escalation_eta = None
+ last_log_timedelta = None
+ escalation_policies_snapshots = escalation_snapshot.escalation_policies_snapshots
+
+ # get escalation log of the last passed escalation step
+ last_escalation_log = (
+ self.alert_group.log_records.filter(
+ escalation_policy_step__isnull=False,
+ pk__gt=stop_escalation_log_pk,
+ )
+ .order_by("pk")
+ .last()
+ )
+ if last_escalation_log is not None:
+ escalation_eta = last_escalation_log.eta
+ last_log_timedelta = now - last_escalation_log.created_at
+
+ # get order of next escalation step
+ escalation_policy_order = escalation_snapshot.last_active_escalation_policy_order or 0
+ # do not exclude wait step, because we need it to count timedelta
+ if (
+ escalation_policies_snapshots
+ and escalation_policies_snapshots[escalation_policy_order].step != EscalationPolicy.STEP_WAIT
+ ):
+ escalation_policy_order += 1
+
+ if len(escalation_policies_snapshots) > 0 and not escalation_eta:
+ future_step_timedelta = None
+ for escalation_policy_snapshot in escalation_policies_snapshots:
+ step_timedelta = esc_timedelta
+ future_step = escalation_policy_snapshot.order >= escalation_policy_order # step not passed yet
+ if future_step and escalation_policy_snapshot.step == EscalationPolicy.STEP_WAIT:
+ wait_delay = escalation_policy_snapshot.wait_delay or EscalationPolicy.DEFAULT_WAIT_DELAY
+ esc_timedelta += wait_delay # increase timedelta for next steps
+ continue
+ # get relative timedelta for step
+ elif future_step and last_log_timedelta:
+ future_step_timedelta = esc_timedelta - last_log_timedelta
+ elif not future_step:
+ passed_last_time = escalation_policy_snapshot.passed_last_time
+ if passed_last_time is not None:
+ step_timedelta = esc_timedelta - (now - passed_last_time)
+ else:
+ step_timedelta = esc_timedelta
+
+ step_timedelta = future_step_timedelta or step_timedelta
+
+ # stop plan generation if there is resolve step in escalation plan
+ if future_step and escalation_policy_snapshot.step == EscalationPolicy.STEP_FINAL_RESOLVE:
+ escalation_plan_dict = IncidentLogBuilder._remove_future_plan(esc_timedelta, escalation_plan_dict)
+ escalation_step_plan_dict = self._render_escalation_step_plan_from_escalation_policy_snapshot(
+ escalation_policy_snapshot,
+ escalation_snapshot,
+ for_slack=for_slack,
+ future_step=future_step,
+ esc_timedelta=step_timedelta,
+ )
+ step_timedelta += timezone.timedelta(seconds=5) # make this step the last in plan
+
+ for timedelta, plan in escalation_step_plan_dict.items():
+ timedelta += step_timedelta
+ escalation_plan_dict.setdefault(timedelta, []).extend(plan)
+ break
+
+ # render escalation and notification plan lines for step
+ escalation_step_plan_dict = self._render_escalation_step_plan_from_escalation_policy_snapshot(
+ escalation_policy_snapshot,
+ escalation_snapshot,
+ for_slack=for_slack,
+ future_step=future_step,
+ esc_timedelta=step_timedelta,
+ )
+ escalation_plan_dict = self._correct_users_notification_plan(
+ escalation_plan_dict, escalation_step_plan_dict, step_timedelta
+ )
+ return escalation_plan_dict
+
+ @staticmethod
+ def _remove_future_plan(timedelta_to_remove, plan_dict):
+ """
+ Removes plan with higher timedelta (for events, that will start later, than selected time
+ (timedelta_to_remove)).
+ :param timedelta_to_remove:
+ :param plan_dict:
+ :return: new plan dict
+ """
+ new_plan_dict = dict()
+ for timedelta in sorted(plan_dict):
+ if timedelta <= timedelta_to_remove:
+ new_plan_dict[timedelta] = plan_dict[timedelta]
+ return new_plan_dict
+
+ def _add_invitation_plan(self, escalation_plan_dict, for_slack=False):
+ """
+ Adds notification plan for invitation
+ :param escalation_plan_dict:
+ :param for_slack:
+ :return: {timedelta: [{"user_id": user.pk, "plan_lines": [#rendered escalation policy line, ]}, ..., ...], ...}
+ """
+ Invitation = apps.get_model("alerts", "Invitation")
+ now = timezone.now()
+ for invitation in self.alert_group.invitations.filter(is_active=True):
+ invitation_timedelta = timezone.timedelta()
+ current_attempt = invitation.attempt - 1
+ # generate notification plan for each attempt
+ for attempt in range(current_attempt, Invitation.ATTEMPTS_LIMIT + 1):
+ notification_plan = self._get_notification_plan_for_user(
+ invitation.invitee,
+ for_slack=for_slack,
+ future_step=attempt >= invitation.attempt,
+ )
+ escalation_plan_dict = self._correct_users_notification_plan(
+ escalation_plan_dict, notification_plan, invitation_timedelta
+ )
+ started_timedelta = now - invitation.created_at
+ invitation_timedelta += Invitation.get_delay_by_attempt(attempt) - started_timedelta
+ return escalation_plan_dict
+
+ def _correct_users_notification_plan(self, escalation_plan_dict, notification_plan_dict, esc_time):
+ """
+ Check if escalation_plan_dict has user notification events with higher timedelta
+ than timedelta of current step. If it has, remove future notification events for users that
+ repeatedly notified by current escalation step from current escalation_plan_dict
+ because their notification chain will start from the beginning.
+
+ :param escalation_plan_dict:
+ :param notification_plan_dict:
+ :param esc_time:
+ :return:
+ """
+
+ future_step_timedelta = None
+
+ later_events_exist = False
+ for timedelta in escalation_plan_dict:
+ if timedelta > esc_time:
+ later_events_exist = True
+ break
+ if later_events_exist:
+ earliest_events = notification_plan_dict.get(timezone.timedelta(), [])
+ notification_plans_to_remove = []
+ for event_dict in earliest_events: # [{"user_id": user.pk, "plan_lines": []}, {"plan_lines": []}]
+ user_id = event_dict.get("user_id")
+ if user_id:
+ notification_plans_to_remove.append(user_id)
+ new_escalation_policies_dict = {}
+
+ for timedelta in sorted(escalation_plan_dict):
+ # do not add step from escalation plan if its timedelta < 0
+ if timedelta < timezone.timedelta():
+ continue
+ events_list = list()
+ for event_dict in escalation_plan_dict[timedelta]:
+ if event_dict.get("is_the_first_notification_step"):
+ if (
+ future_step_timedelta is None
+ and timedelta > esc_time
+ and event_dict.get("user_id") in notification_plans_to_remove
+ ):
+ future_step_timedelta = timedelta
+ if (
+ timedelta < esc_time
+ or event_dict.get("user_id") not in notification_plans_to_remove
+ or future_step_timedelta is not None
+ ):
+ events_list.append(event_dict)
+ if len(events_list) > 0:
+ new_escalation_policies_dict.setdefault(timedelta, []).extend(events_list)
+
+ escalation_plan_dict = new_escalation_policies_dict
+
+ for timedelta, plan in notification_plan_dict.items():
+ timedelta = esc_time + timedelta
+ if future_step_timedelta is None or future_step_timedelta > timedelta:
+ escalation_plan_dict.setdefault(timedelta, []).extend(plan)
+
+ return escalation_plan_dict
+
+ def _finalize_escalation_plan_dict(self, escalation_dict):
+ """
+ It changes escalation dict structure
+ from {timedelta: [{"user_id": user.pk, "plan_lines": []}, {"plan_lines": []}]}
+ to {timedelta: [all plan lines for this timedelta]}
+ :param escalation_dict:
+ :return:
+ """
+ final_escalation_dict = dict()
+ for timedelta in escalation_dict:
+ plan_lines_list = list()
+ for event_dict in escalation_dict[timedelta]:
+ plan_lines_list.extend(event_dict["plan_lines"])
+ if len(plan_lines_list) > 0:
+ timedelta = timedelta if timedelta > timezone.timedelta() else timezone.timedelta()
+ final_escalation_dict.setdefault(timedelta, []).extend(plan_lines_list)
+ return final_escalation_dict
+
+ def _render_escalation_step_plan_from_escalation_policy_snapshot(
+ self,
+ escalation_policy_snapshot,
+ escalation_snapshot,
+ for_slack=False,
+ future_step=False,
+ esc_timedelta=None,
+ ):
+ """
+ Renders escalation and notification policies plan dict.
+
+ :param escalation_policy_snapshot:
+ :param escalation_snapshot:
+ :param for_slack: (bool) add or not user slack id to user notification plan line
+ :param future_step: (bool) step not passed yet
+ :param esc_timedelta: timedelta of escalation step
+
+ :return: dict with timedelta as a key and list with escalation and notification plan lines as a value
+ """
+ EscalationPolicy = apps.get_model("alerts", "EscalationPolicy")
+
+ escalation_plan_dict = {}
+ timedelta = timezone.timedelta()
+ if escalation_policy_snapshot.step in [
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
+ EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
+ ]:
+ users_to_notify = escalation_policy_snapshot.sorted_users_queue
+ if future_step:
+ if users_to_notify:
+ plan_line = f'escalation step "{escalation_policy_snapshot.step_display}"'
+ if escalation_policy_snapshot.step == EscalationPolicy.STEP_NOTIFY_USERS_QUEUE:
+ try:
+ last_user_index = users_to_notify.index(escalation_policy_snapshot.last_notified_user)
+ except ValueError:
+ last_user_index = -1
+ user_to_notify = users_to_notify[(last_user_index + 1) % len(users_to_notify)]
+ users_to_notify = [user_to_notify]
+ else:
+ plan_line = (
+ f'escalation step "{escalation_policy_snapshot.step_display}" with no recipients. ' f"Skipping"
+ )
+ plan = {"plan_lines": [plan_line]}
+ escalation_plan_dict.setdefault(timedelta, []).append(plan)
+ elif escalation_policy_snapshot.step == EscalationPolicy.STEP_NOTIFY_USERS_QUEUE:
+ last_notified_user = escalation_policy_snapshot.last_notified_user
+ users_to_notify = [last_notified_user] if last_notified_user else []
+
+ for user_to_notify in users_to_notify:
+ notification_plan_dict = self._get_notification_plan_for_user(
+ user_to_notify,
+ important=escalation_policy_snapshot.step == EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
+ for_slack=for_slack,
+ future_step=future_step,
+ )
+ # notification_plan_dict structure - {timedelta: [{"user_id": user.pk, "plan_lines": []}]
+ for timedelta, notification_plan in notification_plan_dict.items():
+ escalation_plan_dict.setdefault(timedelta, []).extend(notification_plan)
+
+ elif escalation_policy_snapshot.step == EscalationPolicy.STEP_FINAL_NOTIFYALL:
+ channel_id = escalation_snapshot.slack_channel_id
+ users_to_notify = []
+ if future_step:
+ if self.alert_group.is_presented_in_slack and channel_id:
+ plan_line = f'escalation step "{escalation_policy_snapshot.step_display}"'
+ slack_team_identity = self.alert_group.slack_message.slack_team_identity
+ users_to_notify = slack_team_identity.get_users_from_slack_conversation_for_organization(
+ channel_id=channel_id,
+ organization=self.alert_group.channel.organization,
+ )
+ else:
+ plan_line = (
+ f'escalation step "{escalation_policy_snapshot.step_display}" is slack specific. ' f"Skipping"
+ )
+ plan = {"plan_lines": [plan_line]}
+ escalation_plan_dict.setdefault(timedelta, []).append(plan)
+ else:
+ users_to_notify = escalation_policy_snapshot.notify_to_users_queue
+
+ for user_to_notify in users_to_notify:
+ notification_plan_dict = self._get_notification_plan_for_user(
+ user_to_notify,
+ important=escalation_policy_snapshot.step == EscalationPolicy.STEP_NOTIFY_IMPORTANT,
+ for_slack=for_slack,
+ future_step=future_step,
+ )
+ # notification_plan_dict structure - {timedelta: [{"user_id": user.pk, "plan_lines": []}]
+ for timedelta, notification_plan in notification_plan_dict.items():
+ escalation_plan_dict.setdefault(timedelta, []).extend(notification_plan)
+
+ elif escalation_policy_snapshot.step == EscalationPolicy.STEP_FINAL_RESOLVE:
+ if future_step:
+ plan_line = "resolve automatically"
+ plan = {"plan_lines": [plan_line]}
+ escalation_plan_dict.setdefault(timedelta, []).append(plan)
+ elif escalation_policy_snapshot.step == EscalationPolicy.STEP_REPEAT_ESCALATION_N_TIMES:
+ if future_step:
+ escalation_counter = escalation_policy_snapshot.escalation_counter
+ repeat_times = EscalationPolicy.MAX_TIMES_REPEAT - escalation_counter
+ if repeat_times > 0:
+ plan_line = f"repeat escalation from the beginning ({repeat_times} times)"
+ else:
+ plan_line = 'skip step "Repeat Escalation"'
+ plan = {"plan_lines": [plan_line]}
+ escalation_plan_dict.setdefault(timedelta, []).append(plan)
+ elif escalation_policy_snapshot.step in [
+ EscalationPolicy.STEP_NOTIFY_GROUP,
+ EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT,
+ ]:
+ users_to_notify = []
+ if future_step:
+ if self.alert_group.is_presented_in_slack:
+ user_group = escalation_policy_snapshot.notify_to_group
+ if user_group is not None:
+ users_to_notify = user_group.get_users_from_members_for_organization(
+ self.alert_group.channel.organization
+ )
+ user_group_handle = user_group.handle
+ important_text = ""
+ if escalation_policy_snapshot.step == EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT:
+ important_text = " (Important)"
+ plan_line = f'escalation step "Notify @{user_group_handle} User Group{important_text}"'
+ else:
+ plan_line = (
+ f'escalation step "{escalation_policy_snapshot.step_display}" with no valid '
+ f"user group selected. Skipping"
+ )
+ else:
+ plan_line = (
+ f'escalation step "{escalation_policy_snapshot.step_display}" is slack specific. Skipping'
+ )
+ plan = {"plan_lines": [plan_line]}
+ escalation_plan_dict.setdefault(timedelta, []).append(plan)
+ else:
+ users_to_notify = escalation_policy_snapshot.notify_to_users_queue
+
+ for user_to_notify in users_to_notify:
+ notification_plan_dict = self._get_notification_plan_for_user(
+ user_to_notify,
+ important=escalation_policy_snapshot.step == EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT,
+ for_slack=for_slack,
+ future_step=future_step,
+ )
+ for timedelta, notification_plan in notification_plan_dict.items():
+ escalation_plan_dict.setdefault(timedelta, []).extend(notification_plan)
+ elif escalation_policy_snapshot.step in [
+ EscalationPolicy.STEP_NOTIFY_SCHEDULE,
+ EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT,
+ ]:
+ schedule = escalation_policy_snapshot.notify_schedule
+ users_oncall = []
+ if future_step:
+ if schedule is not None:
+ step_datetime = timezone.now() + esc_timedelta
+ users_oncall = list_users_to_notify_from_ical(schedule, step_datetime)
+ important_text = ""
+ if escalation_policy_snapshot.step == EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT:
+ important_text = " (Important)"
+ plan_line = f"escalation step \"Notify on-call from Schedule '{schedule.name}'{important_text}\""
+ if users_oncall is None:
+ plan_line += ", but iCal import was failed. Skipping"
+ elif len(users_oncall) == 0:
+ plan_line += ", but there are no users to notify for this schedule slot. Skipping"
+ else:
+ plan_line = (
+ f'escalation step "{escalation_policy_snapshot.step_display}", but schedule is '
+ f"unspecified. Skipping"
+ )
+ plan = {"plan_lines": [plan_line]}
+ escalation_plan_dict.setdefault(timedelta, []).append(plan)
+ else:
+ users_oncall = escalation_policy_snapshot.notify_to_users_queue
+
+ for user_to_notify in users_oncall:
+ notification_plan_dict = self._get_notification_plan_for_user(
+ user_to_notify,
+ for_slack=for_slack,
+ important=escalation_policy_snapshot.step == EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT,
+ future_step=future_step,
+ )
+ # notification_plan_dict structure - {timedelta: [{"user_id": user.pk, "plan_lines": []}]
+ for timedelta, notification_plan in notification_plan_dict.items():
+ escalation_plan_dict.setdefault(timedelta, []).extend(notification_plan)
+ elif escalation_policy_snapshot.step == EscalationPolicy.STEP_TRIGGER_CUSTOM_BUTTON:
+ if future_step:
+ custom_button = escalation_policy_snapshot.custom_button_trigger
+ if custom_button is not None:
+ plan_line = f"trigger outgoing webhook `{custom_button.name}`"
+ else:
+ plan_line = (
+ f'escalation step "{escalation_policy_snapshot.step_display}", '
+ f"but outgoing webhook is unspecified. Skipping"
+ )
+ plan = {"plan_lines": [plan_line]}
+ escalation_plan_dict.setdefault(timedelta, []).append(plan)
+ elif escalation_policy_snapshot.step == EscalationPolicy.STEP_NOTIFY_IF_TIME:
+ if future_step:
+ if escalation_policy_snapshot.from_time is not None and escalation_policy_snapshot.to_time is not None:
+ plan_line = 'escalation step "Continue escalation if time"'
+ else:
+ plan_line = 'escalation step "Continue escalation if time", but time is not configured. Skipping'
+ plan = {"plan_lines": [plan_line]}
+ escalation_plan_dict.setdefault(timedelta, []).append(plan)
+ elif escalation_policy_snapshot.step is None:
+ if future_step:
+ plan_line = "escalation step is unspecified. Skipping"
+ plan = {"plan_lines": [plan_line]}
+ escalation_plan_dict.setdefault(timedelta, []).append(plan)
+ return escalation_plan_dict
+
+ def _render_user_notification_line(self, user_to_notify, notification_policy, for_slack=False):
+ """
+ Renders user notification plan line
+ :param user_to_notify:
+ :param notification_policy:
+ :param for_slack: (bool) add or not user slack id to user notification plan line
+ :return: plan line
+ """
+ UserNotificationPolicy = apps.get_model("base", "UserNotificationPolicy")
+ result = ""
+ user_verbal = user_to_notify.get_user_verbal_for_team_for_slack() if for_slack else user_to_notify.username
+ if notification_policy.step == UserNotificationPolicy.Step.NOTIFY:
+ if notification_policy.notify_by == UserNotificationPolicy.NotificationChannel.SLACK:
+ result += f"invite {user_verbal} in Slack"
+ elif notification_policy.notify_by == UserNotificationPolicy.NotificationChannel.SMS:
+ result += f"send sms to {user_verbal}"
+ elif notification_policy.notify_by == UserNotificationPolicy.NotificationChannel.PHONE_CALL:
+ result += f"call {user_verbal} by phone"
+ elif notification_policy.notify_by == UserNotificationPolicy.NotificationChannel.TELEGRAM:
+ result += f"send telegram message to {user_verbal}"
+ # TODO: restore email notifications
+ # elif notification_policy.notify_by == UserNotificationPolicy.NotificationChannel.EMAIL:
+ # result += f"send email to {user_verbal}"
+ else:
+ try:
+ backend_id = UserNotificationPolicy.NotificationChannel(notification_policy.notify_by).name
+ backend = get_messaging_backend_from_id(backend_id)
+ except ValueError:
+ pass
+ else:
+ result += f"send {backend.label.lower()} message to {user_verbal}"
+ if not result:
+ result += f"inviting {user_verbal} but notification channel is unspecified"
+ return result
+
+ def _get_notification_plan_for_user(self, user_to_notify, future_step=False, important=False, for_slack=False):
+ """
+ Renders user notification plan
+ :param user_to_notify:
+ :param future_step:
+ :param important:
+ :param for_slack: (bool) add or not user slack id to user notification plan line
+ :return: {timedelta: [{"user_id": user.pk, "plan_lines": [#rendered notification policy line, ]}, ...], ...}
+ """
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+ UserNotificationPolicy = apps.get_model("base", "UserNotificationPolicy")
+
+ timedelta = timezone.timedelta()
+ is_the_first_notification_step = future_step # escalation starts with this step or not
+
+ # generate starter dict for notification plan
+ plan_lines_dict = {
+ "user_id": user_to_notify.pk,
+ "plan_lines": [],
+ "is_the_first_notification_step": is_the_first_notification_step,
+ }
+ notification_plan_dict = {timedelta: [plan_lines_dict]}
+
+ last_user_log = None
+
+ notification_policy_order = 0
+ if not future_step: # escalation step has been passed, so escalation for user has been already triggered.
+ last_user_log = (
+ user_to_notify.personal_log_records.filter(
+ alert_group=self.alert_group,
+ notification_policy__isnull=False,
+ type__in=[
+ UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED,
+ UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FINISHED,
+ ],
+ )
+ .order_by("created_at")
+ .last()
+ )
+
+ if last_user_log and last_user_log.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED:
+ if last_user_log.notification_policy is not None:
+ notification_step = (
+ last_user_log.notification_step
+ if last_user_log.notification_step is not None
+ else last_user_log.notification_policy.step
+ )
+ # get order of the next notification step
+ if notification_step == UserNotificationPolicy.Step.WAIT:
+ # do not exclude wait step, because we need it to count timedelta
+ notification_policy_order = last_user_log.notification_policy.order
+ else:
+ # last passed step order + 1
+ notification_policy_order = last_user_log.notification_policy.order + 1
+
+ notification_policies = UserNotificationPolicy.objects.get_or_create_for_user(
+ user=user_to_notify, important=important
+ )
+
+ for notification_policy in notification_policies:
+ future_notification = notification_policy.order >= notification_policy_order
+ if notification_policy.step == UserNotificationPolicy.Step.WAIT:
+ wait_delay = notification_policy.wait_delay
+ if wait_delay is not None:
+ timedelta += wait_delay # increase timedelta for next steps
+ elif future_notification:
+ plan_line = self._render_user_notification_line(
+ user_to_notify, notification_policy, for_slack=for_slack
+ )
+ # add plan_line to user plan_lines list
+ if not notification_plan_dict.get(timedelta):
+ plan = {"user_id": user_to_notify.pk, "plan_lines": [plan_line]}
+ notification_plan_dict.setdefault(timedelta, []).append(plan)
+ else:
+ notification_plan_dict[timedelta][0]["plan_lines"].append(plan_line)
+ return notification_plan_dict
diff --git a/engine/apps/alerts/integration_options_mixin.py b/engine/apps/alerts/integration_options_mixin.py
new file mode 100644
index 0000000000..b5b00a417d
--- /dev/null
+++ b/engine/apps/alerts/integration_options_mixin.py
@@ -0,0 +1,80 @@
+import importlib
+
+from django.conf import settings
+
+from common.utils import getattrd
+
+
+class IntegrationOptionsMixin:
+ DEFAULT_INTEGRATION = "grafana"
+ # Import every integration config file listed in settings.INSTALLED_ONCALL_INTEGRATIONS
+ # as a submodule into a tuple, e.g. AlertReceiveChannel._config[0].id, slug, description, etc..
+ _config = tuple(
+ (importlib.import_module(integration_config) for integration_config in settings.INSTALLED_ONCALL_INTEGRATIONS)
+ )
+
+ def __init__(self, *args, **kwargs):
+ super(IntegrationOptionsMixin, self).__init__(*args, **kwargs)
+ # Object integration configs (imported as submodules earlier) are also available in `config` field,
+ # e.g. instance.config.id, instance.config.slug, instance.config.description, etc...
+ for integration in self._config:
+ if integration.slug == self.integration:
+ self.config = integration
+
+ # Define variables for backward compatibility, e.g. INTEGRATION_GRAFANA, INTEGRATION_FORMATTED_WEBHOOK, etc...
+ for integration_config in _config:
+ vars()[f"INTEGRATION_{integration_config.slug.upper()}"] = integration_config.slug
+
+ INTEGRATION_CHOICES = tuple(
+ (
+ (
+ integration_config.slug,
+ integration_config.title,
+ )
+ for integration_config in _config
+ )
+ )
+
+ # Following attributes are generated from _config for backwards compatibility and used across the codebase
+ INTEGRATIONS_TO_REVERSE_URL_MAP = {
+ integration_config.slug: integration_config.slug for integration_config in _config
+ }
+ WEB_INTEGRATION_CHOICES = [
+ integration_config.slug for integration_config in _config if integration_config.is_displayed_on_web
+ ]
+ INTEGRATIONS_TO_INSTRUCTIONS_WEB = {
+ integration_config.slug: f"html/integration_{integration_config.slug}.html" for integration_config in _config
+ }
+ INTEGRATION_SHORT_DESCRIPTION = {
+ integration_config.slug: integration_config.short_description for integration_config in _config
+ }
+ INTEGRATION_FEATURED = [integration_config.slug for integration_config in _config if integration_config.is_featured]
+
+ # The following attributes dynamically generated and used by apps.alerts.incident_appearance.renderers, templaters
+ # e.g. INTEGRATION_TO_DEFAULT_SLACK_TITLE_TEMPLATE, INTEGRATION_TO_DEFAULT_SLACK_MESSAGE_TEMPLATE, etc...
+ template_names = [
+ "slack_title",
+ "slack_message",
+ "slack_image_url",
+ "web_title",
+ "web_message",
+ "web_image_url",
+ "email_title",
+ "email_message",
+ "sms_title",
+ "phone_call_title",
+ "telegram_title",
+ "telegram_message",
+ "telegram_image_url",
+ "grouping_id",
+ "resolve_condition",
+ "acknowledge_condition",
+ "group_verbose_name",
+ "source_link",
+ ]
+
+ for template_name in template_names:
+ result = dict()
+ for integration_config in _config:
+ result[integration_config.slug] = getattrd(integration_config, template_name, None)
+ vars()[f"INTEGRATION_TO_DEFAULT_{template_name.upper()}_TEMPLATE"] = result
diff --git a/engine/apps/alerts/migrations/0001_squashed_initial.py b/engine/apps/alerts/migrations/0001_squashed_initial.py
new file mode 100644
index 0000000000..40ec3b57eb
--- /dev/null
+++ b/engine/apps/alerts/migrations/0001_squashed_initial.py
@@ -0,0 +1,276 @@
+# Generated by Django 3.2.5 on 2022-05-31 14:46
+
+import apps.alerts.escalation_snapshot.escalation_snapshot_mixin
+import apps.alerts.integration_options_mixin
+import apps.alerts.models.alert
+import apps.alerts.models.alert_group
+import apps.alerts.models.alert_receive_channel
+import apps.alerts.models.channel_filter
+import apps.alerts.models.custom_button
+import apps.alerts.models.escalation_chain
+import apps.alerts.models.escalation_policy
+import apps.alerts.models.resolution_note
+import datetime
+import django.core.validators
+from django.db import migrations, models
+import django.db.models.deletion
+import django.db.models.manager
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='Alert',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('public_primary_key', models.CharField(default=apps.alerts.models.alert.generate_public_primary_key_for_alert, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('is_resolve_signal', models.BooleanField(default=False)),
+ ('is_the_first_alert_in_group', models.BooleanField(default=False)),
+ ('message', models.TextField(default=None, max_length=3000, null=True)),
+ ('image_url', models.URLField(default=None, max_length=300, null=True)),
+ ('delivered', models.BooleanField(default=False)),
+ ('title', models.TextField(default=None, max_length=1500, null=True)),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('link_to_upstream_details', models.URLField(default=None, max_length=500, null=True)),
+ ('integration_unique_data', models.JSONField(default=None, null=True)),
+ ('raw_request_data', models.JSONField()),
+ ('integration_optimization_hash', models.CharField(db_index=True, default=None, max_length=100, null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='AlertGroup',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('public_primary_key', models.CharField(default=apps.alerts.models.alert_group.generate_public_primary_key_for_alert_group, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('distinction', models.CharField(db_index=True, default=None, max_length=100, null=True)),
+ ('verbose_name', models.TextField(default=None, null=True)),
+ ('inside_organization_number', models.IntegerField(default=0)),
+ ('resolved', models.BooleanField(default=False)),
+ ('resolved_by', models.IntegerField(choices=[(0, 'source'), (1, 'user'), (2, 'not yet'), (3, 'last escalation step'), (4, 'archived'), (5, 'wiped'), (6, 'stop maintenance')], default=2)),
+ ('resolved_at', models.DateTimeField(blank=True, null=True)),
+ ('acknowledged', models.BooleanField(default=False)),
+ ('acknowledged_on_source', models.BooleanField(default=False)),
+ ('acknowledged_at', models.DateTimeField(blank=True, null=True)),
+ ('acknowledged_by', models.IntegerField(choices=[(0, 'source'), (1, 'user'), (2, 'not yet'), (3, 'last escalation step'), (4, 'archived'), (5, 'wiped'), (6, 'stop maintenance')], default=2)),
+ ('acknowledged_by_confirmed', models.DateTimeField(default=None, null=True)),
+ ('is_escalation_finished', models.BooleanField(default=False)),
+ ('started_at', models.DateTimeField(auto_now_add=True)),
+ ('slack_message_sent', models.BooleanField(default=False)),
+ ('active_escalation_id', models.CharField(default=None, max_length=100, null=True)),
+ ('active_resolve_calculation_id', models.CharField(default=None, max_length=100, null=True)),
+ ('active_cache_for_web_calculation_id', models.CharField(default=None, max_length=100, null=True)),
+ ('silenced', models.BooleanField(default=False)),
+ ('silenced_at', models.DateTimeField(null=True)),
+ ('silenced_until', models.DateTimeField(blank=True, null=True)),
+ ('unsilence_task_uuid', models.CharField(default=None, max_length=100, null=True)),
+ ('reason_to_skip_escalation', models.IntegerField(choices=[(0, 'account_inactive'), (1, 'channel_archived'), (2, 'no_reason'), (3, 'rate_limited'), (4, 'channel_not_specified'), (5, 'restricted_action')], default=2)),
+ ('manual_severity', models.IntegerField(choices=[(0, 'high'), (1, 'low'), (2, 'none')], default=2)),
+ ('resolution_note_ts', models.CharField(default=None, max_length=100, null=True)),
+ ('cached_render_for_web', models.JSONField(default=dict)),
+ ('last_unique_unacknowledge_process_id', models.CharField(default=None, max_length=100, null=True)),
+ ('is_archived', models.BooleanField(default=False)),
+ ('wiped_at', models.DateTimeField(default=None, null=True)),
+ ('prevent_posting_alerts', models.BooleanField(default=False)),
+ ('maintenance_uuid', models.CharField(default=None, max_length=100, null=True, unique=True)),
+ ('raw_escalation_snapshot', models.JSONField(default=None, null=True)),
+ ('estimate_escalation_finish_time', models.DateTimeField(default=None, null=True)),
+ ('is_open_for_grouping', models.BooleanField(blank=True, default=None, null=True)),
+ ],
+ options={
+ 'get_latest_by': 'pk',
+ },
+ bases=(apps.alerts.models.alert_group.AlertGroupSlackRenderingMixin, apps.alerts.escalation_snapshot.escalation_snapshot_mixin.EscalationSnapshotMixin, models.Model),
+ managers=[
+ ('all_objects', django.db.models.manager.Manager()),
+ ],
+ ),
+ migrations.CreateModel(
+ name='AlertGroupCounter',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('value', models.PositiveBigIntegerField(default=0)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='AlertGroupLogRecord',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('type', models.IntegerField(choices=[(0, 'Acknowledged'), (1, 'Unacknowledged'), (2, 'Invite'), (3, 'Stop invitation'), (4, 'Re-invite'), (5, 'Escalation triggered'), (6, 'Invitation triggered'), (16, 'Escalation finished'), (7, 'Silenced'), (15, 'Unsilenced'), (8, 'Attached'), (9, 'Unattached'), (10, 'Custom button triggered'), (11, 'Unacknowledged by timeout'), (12, 'Failed attachment'), (13, 'Incident resolved'), (14, 'Incident unresolved'), (17, 'Escalation failed'), (18, 'Acknowledge reminder triggered'), (19, 'Wiped'), (20, 'Deleted'), (21, 'Incident registered'), (22, 'A route is assigned to the incident')])),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('reason', models.TextField(default=None, null=True)),
+ ('silence_delay', models.DurationField(default=None, null=True)),
+ ('eta', models.DateTimeField(default=None, null=True)),
+ ('escalation_error_code', models.PositiveIntegerField(default=None, null=True)),
+ ('escalation_policy_step', models.IntegerField(default=None, null=True)),
+ ('step_specific_info', models.JSONField(default=None, null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='AlertGroupPostmortem',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('public_primary_key', models.CharField(default=apps.alerts.models.resolution_note.generate_public_primary_key_for_alert_group_postmortem, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('last_modified', models.DateTimeField(auto_now=True)),
+ ('text', models.TextField(default=None, max_length=3000, null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='AlertReceiveChannel',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('maintenance_duration', models.DurationField(choices=[(datetime.timedelta(seconds=3600), '1 hour'), (datetime.timedelta(seconds=10800), '3 hours'), (datetime.timedelta(seconds=21600), '6 hours'), (datetime.timedelta(seconds=43200), '12 hours'), (datetime.timedelta(days=1), '24 hours')], default=None, null=True)),
+ ('maintenance_mode', models.IntegerField(choices=[(0, 'Debug'), (1, 'Maintenance')], default=None, null=True)),
+ ('maintenance_uuid', models.CharField(default=None, max_length=250, null=True, unique=True)),
+ ('maintenance_started_at', models.DateTimeField(default=None, null=True)),
+ ('public_primary_key', models.CharField(default=apps.alerts.models.alert_receive_channel.generate_public_primary_key_for_alert_receive_channel, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('deleted_at', models.DateTimeField(blank=True, null=True)),
+ ('integration', models.CharField(choices=[('alertmanager', 'AlertManager'), ('grafana', 'Grafana'), ('grafana_alerting', 'Grafana Alerting'), ('formatted_webhook', 'Formatted Webhook'), ('webhook', 'Webhook'), ('amazon_sns', 'Amazon SNS'), ('heartbeat', 'Heartbeat'), ('inbound_email', 'Inboubd Email'), ('maintenance', 'Maintenance'), ('manual', 'Manual'), ('slack_channel', 'Slack Channel'), ('stackdriver', 'Stackdriver'), ('curler', 'Curler'), ('datadog', 'Datadog'), ('demo', 'Demo'), ('elastalert', 'Elastalert'), ('fabric', 'Fabric'), ('kapacitor', 'Kapacitor'), ('newrelic', 'New Relic'), ('pagerduty', 'Pagerduty'), ('pingdom', 'Pingdom'), ('prtg', 'PRTG'), ('sentry', 'Sentry'), ('uptimerobot', 'UptimeRobot'), ('zabbix', 'Zabbix')], default='grafana', max_length=100)),
+ ('allow_source_based_resolving', models.BooleanField(default=True)),
+ ('token', models.CharField(db_index=True, default=apps.alerts.models.alert_receive_channel.random_token_generator, max_length=30)),
+ ('smile_code', models.TextField(default=':slightly_smiling_face:')),
+ ('verbal_name', models.CharField(default=None, max_length=150, null=True)),
+ ('integration_slack_channel_id', models.CharField(default=None, max_length=150, null=True)),
+ ('is_finished_alerting_setup', models.BooleanField(default=False)),
+ ('slack_title_template', models.TextField(default=None, null=True)),
+ ('slack_message_template', models.TextField(default=None, null=True)),
+ ('slack_image_url_template', models.TextField(default=None, null=True)),
+ ('sms_title_template', models.TextField(default=None, null=True)),
+ ('phone_call_title_template', models.TextField(default=None, null=True)),
+ ('web_title_template', models.TextField(default=None, null=True)),
+ ('web_message_template', models.TextField(default=None, null=True)),
+ ('web_image_url_template', models.TextField(default=None, null=True)),
+ ('email_title_template', models.TextField(default=None, null=True)),
+ ('email_message_template', models.TextField(default=None, null=True)),
+ ('telegram_title_template', models.TextField(default=None, null=True)),
+ ('telegram_message_template', models.TextField(default=None, null=True)),
+ ('telegram_image_url_template', models.TextField(default=None, null=True)),
+ ('source_link_template', models.TextField(default=None, null=True)),
+ ('grouping_id_template', models.TextField(default=None, null=True)),
+ ('resolve_condition_template', models.TextField(default=None, null=True)),
+ ('acknowledge_condition_template', models.TextField(default=None, null=True)),
+ ('messaging_backends_templates', models.JSONField(default=None, null=True)),
+ ('rate_limited_in_slack_at', models.DateTimeField(default=None, null=True)),
+ ('rate_limit_message_task_id', models.CharField(default=None, max_length=100, null=True)),
+ ],
+ bases=(apps.alerts.integration_options_mixin.IntegrationOptionsMixin, models.Model),
+ ),
+ migrations.CreateModel(
+ name='ChannelFilter',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
+ ('public_primary_key', models.CharField(default=apps.alerts.models.channel_filter.generate_public_primary_key_for_channel_filter, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('notify_in_slack', models.BooleanField(default=True, null=True)),
+ ('notify_in_telegram', models.BooleanField(default=False, null=True)),
+ ('slack_channel_id', models.CharField(default=None, max_length=100, null=True)),
+ ('notification_backends', models.JSONField(default=None, null=True)),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('filtering_term', models.CharField(default=None, max_length=1024, null=True)),
+ ('is_default', models.BooleanField(default=False)),
+ ],
+ options={
+ 'ordering': ('alert_receive_channel', 'is_default', 'order'),
+ },
+ ),
+ migrations.CreateModel(
+ name='CustomButton',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('public_primary_key', models.CharField(default=apps.alerts.models.custom_button.generate_public_primary_key_for_custom_button, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('name', models.CharField(max_length=100)),
+ ('webhook', models.CharField(default=None, max_length=1000, null=True)),
+ ('data', models.TextField(default=None, null=True)),
+ ('user', models.CharField(default=None, max_length=100, null=True)),
+ ('password', models.CharField(default=None, max_length=100, null=True)),
+ ('deleted_at', models.DateTimeField(blank=True, null=True)),
+ ('authorization_header', models.CharField(default=None, max_length=1000, null=True)),
+ ('forward_whole_payload', models.BooleanField(default=False)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='EscalationChain',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('public_primary_key', models.CharField(default=apps.alerts.models.escalation_chain.generate_public_primary_key_for_escalation_chain, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('name', models.CharField(max_length=100)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='EscalationPolicy',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
+ ('public_primary_key', models.CharField(default=apps.alerts.models.escalation_policy.generate_public_primary_key_for_escalation_policy, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('step', models.IntegerField(choices=[(0, 'Wait'), (1, 'Notify User'), (2, 'Notify Whole Channel'), (3, 'Repeat Escalation (5 times max)'), (4, 'Resolve'), (5, 'Notify Group'), (6, 'Notify Schedule'), (7, 'Notify User (Important)'), (8, 'Notify Group (Important)'), (9, 'Notify Schedule (Important)'), (10, 'Trigger Outgoing Webhook'), (11, 'Notify User (next each time)'), (12, 'Continue escalation only if time is from'), (13, 'Notify multiple Users'), (14, 'Notify multiple Users (Important)'), (15, 'Continue escalation if >X alerts per Y minutes')], default=None, null=True)),
+ ('wait_delay', models.DurationField(choices=[(datetime.timedelta(seconds=60), '1 min'), (datetime.timedelta(seconds=300), '5 min'), (datetime.timedelta(seconds=900), '15 min'), (datetime.timedelta(seconds=1800), '30 min'), (datetime.timedelta(seconds=3600), '60 min')], default=None, null=True)),
+ ('from_time', models.TimeField(default=None, null=True)),
+ ('to_time', models.TimeField(default=None, null=True)),
+ ('num_alerts_in_window', models.PositiveIntegerField(default=None, null=True)),
+ ('num_minutes_in_window', models.PositiveIntegerField(default=None, null=True)),
+ ],
+ options={
+ 'ordering': ('order',),
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='GrafanaAlertingContactPoint',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('uid', models.CharField(default=None, max_length=100, null=True)),
+ ('name', models.CharField(max_length=100)),
+ ('datasource_name', models.CharField(default='grafana', max_length=100)),
+ ('datasource_id', models.IntegerField(default=None, null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Invitation',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('is_active', models.BooleanField(default=True)),
+ ('attempt', models.IntegerField(default=0)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ResolutionNote',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('public_primary_key', models.CharField(default=apps.alerts.models.resolution_note.generate_public_primary_key_for_resolution_note, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('source', models.IntegerField(choices=[(0, 'slack'), (1, 'web')], default=None, null=True)),
+ ('message_text', models.TextField(default=None, max_length=3000, null=True)),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('deleted_at', models.DateTimeField(default=None, null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ResolutionNoteSlackMessage',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('text', models.TextField(default=None, max_length=3000, null=True)),
+ ('slack_channel_id', models.CharField(default=None, max_length=100, null=True)),
+ ('ts', models.CharField(default=None, max_length=100, null=True)),
+ ('thread_ts', models.CharField(default=None, max_length=100, null=True)),
+ ('permalink', models.CharField(default=None, max_length=250, null=True)),
+ ('added_to_resolution_note', models.BooleanField(default=False)),
+ ('posted_by_bot', models.BooleanField(default=False)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='UserHasNotification',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('active_notification_policy_id', models.CharField(default=None, max_length=100, null=True)),
+ ('alert_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='alerts.alertgroup')),
+ ],
+ ),
+ ]
diff --git a/engine/apps/alerts/migrations/0002_squashed_initial.py b/engine/apps/alerts/migrations/0002_squashed_initial.py
new file mode 100644
index 0000000000..bd0b33dc97
--- /dev/null
+++ b/engine/apps/alerts/migrations/0002_squashed_initial.py
@@ -0,0 +1,310 @@
+# Generated by Django 3.2.5 on 2022-05-31 14:46
+
+from django.db import migrations, models
+import django.db.models.deletion
+import django.db.models.manager
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ('user_management', '0001_squashed_initial'),
+ ('alerts', '0001_squashed_initial'),
+ ('slack', '0002_squashed_initial'),
+ ('telegram', '0001_squashed_initial'),
+ ('schedules', '0001_squashed_initial'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='userhasnotification',
+ name='user',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='resolutionnoteslackmessage',
+ name='added_by_user',
+ field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='added_resolution_note_slack_messages', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='resolutionnoteslackmessage',
+ name='alert_group',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='resolution_note_slack_messages', to='alerts.alertgroup'),
+ ),
+ migrations.AddField(
+ model_name='resolutionnoteslackmessage',
+ name='user',
+ field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='authored_resolution_note_slack_messages', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='resolutionnote',
+ name='alert_group',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='resolution_notes', to='alerts.alertgroup'),
+ ),
+ migrations.AddField(
+ model_name='resolutionnote',
+ name='author',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='authored_resolution_notes', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='resolutionnote',
+ name='resolution_note_slack_message',
+ field=models.OneToOneField(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='resolution_note', to='alerts.resolutionnoteslackmessage'),
+ ),
+ migrations.AddField(
+ model_name='invitation',
+ name='alert_group',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invitations', to='alerts.alertgroup'),
+ ),
+ migrations.AddField(
+ model_name='invitation',
+ name='author',
+ field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='author_of_invitations', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='invitation',
+ name='invitee',
+ field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invitee_in_invitations', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='grafanaalertingcontactpoint',
+ name='alert_receive_channel',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='contact_points', to='alerts.alertreceivechannel'),
+ ),
+ migrations.AddField(
+ model_name='escalationpolicy',
+ name='custom_button_trigger',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='escalation_policies', to='alerts.custombutton'),
+ ),
+ migrations.AddField(
+ model_name='escalationpolicy',
+ name='escalation_chain',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='escalation_policies', to='alerts.escalationchain'),
+ ),
+ migrations.AddField(
+ model_name='escalationpolicy',
+ name='last_notified_user',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='last_notified_in_escalation_policies', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='escalationpolicy',
+ name='notify_schedule',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='escalation_policies', to='schedules.oncallschedule'),
+ ),
+ migrations.AddField(
+ model_name='escalationpolicy',
+ name='notify_to_group',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='slack.slackusergroup'),
+ ),
+ migrations.AddField(
+ model_name='escalationpolicy',
+ name='notify_to_users_queue',
+ field=models.ManyToManyField(to='user_management.User'),
+ ),
+ migrations.AddField(
+ model_name='escalationchain',
+ name='organization',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='escalation_chains', to='user_management.organization'),
+ ),
+ migrations.AddField(
+ model_name='escalationchain',
+ name='team',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='escalation_chains', to='user_management.team'),
+ ),
+ migrations.AddField(
+ model_name='custombutton',
+ name='organization',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='custom_buttons', to='user_management.organization'),
+ ),
+ migrations.AddField(
+ model_name='custombutton',
+ name='team',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='custom_buttons', to='user_management.team'),
+ ),
+ migrations.AddField(
+ model_name='channelfilter',
+ name='alert_receive_channel',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='channel_filters', to='alerts.alertreceivechannel'),
+ ),
+ migrations.AddField(
+ model_name='channelfilter',
+ name='escalation_chain',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_filters', to='alerts.escalationchain'),
+ ),
+ migrations.AddField(
+ model_name='channelfilter',
+ name='telegram_channel',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='channel_filter', to='telegram.telegramtoorganizationconnector'),
+ ),
+ migrations.AddField(
+ model_name='alertreceivechannel',
+ name='author',
+ field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='alert_receive_channels', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='alertreceivechannel',
+ name='maintenance_author',
+ field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='alertreceivechannel_maintenances_created', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='alertreceivechannel',
+ name='organization',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='alert_receive_channels', to='user_management.organization'),
+ ),
+ migrations.AddField(
+ model_name='alertreceivechannel',
+ name='team',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='alert_receive_channels', to='user_management.team'),
+ ),
+ migrations.AddField(
+ model_name='alertgrouppostmortem',
+ name='alert_group',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='postmortem_text', to='alerts.alertgroup'),
+ ),
+ migrations.AddField(
+ model_name='alertgrouplogrecord',
+ name='alert_group',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='log_records', to='alerts.alertgroup'),
+ ),
+ migrations.AddField(
+ model_name='alertgrouplogrecord',
+ name='author',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='log_records', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='alertgrouplogrecord',
+ name='custom_button',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_DEFAULT, related_name='log_records', to='alerts.custombutton'),
+ ),
+ migrations.AddField(
+ model_name='alertgrouplogrecord',
+ name='dependent_alert_group',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='dependent_log_records', to='alerts.alertgroup'),
+ ),
+ migrations.AddField(
+ model_name='alertgrouplogrecord',
+ name='escalation_policy',
+ field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='log_records', to='alerts.escalationpolicy'),
+ ),
+ migrations.AddField(
+ model_name='alertgrouplogrecord',
+ name='invitation',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='log_records', to='alerts.invitation'),
+ ),
+ migrations.AddField(
+ model_name='alertgrouplogrecord',
+ name='root_alert_group',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='root_log_records', to='alerts.alertgroup'),
+ ),
+ migrations.AddField(
+ model_name='alertgroupcounter',
+ name='organization',
+ field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='user_management.organization'),
+ ),
+ migrations.AddField(
+ model_name='alertgroup',
+ name='acknowledged_by_user',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='alertgroup',
+ name='channel',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='alert_groups', to='alerts.alertreceivechannel'),
+ ),
+ migrations.AddField(
+ model_name='alertgroup',
+ name='channel_filter',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_DEFAULT, related_name='alert_groups', to='alerts.channelfilter'),
+ ),
+ migrations.AddField(
+ model_name='alertgroup',
+ name='resolved_by_alert',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='resolved_alert_groups', to='alerts.alert'),
+ ),
+ migrations.AddField(
+ model_name='alertgroup',
+ name='resolved_by_user',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='resolved_alert_groups', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='alertgroup',
+ name='root_alert_group',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='dependent_alert_groups', to='alerts.alertgroup'),
+ ),
+ migrations.AddField(
+ model_name='alertgroup',
+ name='silenced_by_user',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='silenced_alert_groups', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='alertgroup',
+ name='slack_log_message',
+ field=models.OneToOneField(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='slack.slackmessage'),
+ ),
+ migrations.AddField(
+ model_name='alertgroup',
+ name='slack_message',
+ field=models.OneToOneField(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='_alert_group', to='slack.slackmessage'),
+ ),
+ migrations.AddField(
+ model_name='alertgroup',
+ name='wiped_by',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='wiped_by_user', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='alert',
+ name='group',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='alerts', to='alerts.alertgroup'),
+ ),
+ migrations.CreateModel(
+ name='AlertForAlertManager',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ 'indexes': [],
+ 'constraints': [],
+ },
+ bases=('alerts.alert',),
+ ),
+ migrations.CreateModel(
+ name='AlertGroupForAlertManager',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ 'indexes': [],
+ 'constraints': [],
+ },
+ bases=('alerts.alertgroup',),
+ managers=[
+ ('all_objects', django.db.models.manager.Manager()),
+ ],
+ ),
+ migrations.AlterUniqueTogether(
+ name='userhasnotification',
+ unique_together={('user', 'alert_group')},
+ ),
+ migrations.AlterUniqueTogether(
+ name='resolutionnoteslackmessage',
+ unique_together={('thread_ts', 'ts')},
+ ),
+ migrations.AlterUniqueTogether(
+ name='escalationchain',
+ unique_together={('organization', 'name')},
+ ),
+ migrations.AlterUniqueTogether(
+ name='custombutton',
+ unique_together={('name', 'organization')},
+ ),
+ migrations.AddConstraint(
+ model_name='alertreceivechannel',
+ constraint=models.UniqueConstraint(fields=('organization', 'verbal_name', 'deleted_at'), name='unique integration name'),
+ ),
+ migrations.AlterUniqueTogether(
+ name='alertgroup',
+ unique_together={('channel_id', 'channel_filter_id', 'distinction', 'is_open_for_grouping')},
+ ),
+ ]
diff --git a/engine/apps/alerts/migrations/0003_squashed_create_demo_token_instances.py b/engine/apps/alerts/migrations/0003_squashed_create_demo_token_instances.py
new file mode 100644
index 0000000000..5729cbd68e
--- /dev/null
+++ b/engine/apps/alerts/migrations/0003_squashed_create_demo_token_instances.py
@@ -0,0 +1,178 @@
+# Generated by Django 3.2.5 on 2021-08-04 10:42
+
+import sys
+from django.db import migrations
+from django.utils import timezone, dateparse
+from apps.alerts.models.alert_receive_channel import number_to_smiles_translator
+from apps.public_api import constants as public_api_constants
+
+
+TYPE_SINGLE_EVENT = 0
+TYPE_RECURRENT_EVENT = 1
+FREQUENCY_WEEKLY = 1
+SOURCE_TERRAFORM = 3
+STEP_WAIT = 0
+STEP_NOTIFY_USERS_QUEUE = 12
+SOURCE_WEB = 1
+
+
+def create_demo_token_instances(apps, schema_editor):
+ if not (len(sys.argv) > 1 and sys.argv[1] == 'test'):
+ User = apps.get_model('user_management', 'User')
+ Organization = apps.get_model('user_management', 'Organization')
+ AlertReceiveChannel = apps.get_model('alerts', 'AlertReceiveChannel')
+ EscalationChain = apps.get_model('alerts', 'EscalationChain')
+ ChannelFilter = apps.get_model('alerts', 'ChannelFilter')
+ EscalationPolicy = apps.get_model('alerts', 'EscalationPolicy')
+ OnCallScheduleICal = apps.get_model('schedules', 'OnCallScheduleICal')
+ AlertGroup = apps.get_model('alerts', 'AlertGroup')
+ Alert = apps.get_model('alerts', 'Alert')
+ CustomButton = apps.get_model("alerts", "CustomButton")
+ CustomOnCallShift = apps.get_model('schedules', 'CustomOnCallShift')
+
+ organization = Organization.objects.get(public_primary_key=public_api_constants.DEMO_ORGANIZATION_ID)
+ user = User.objects.get(public_primary_key=public_api_constants.DEMO_USER_ID)
+
+ alert_receive_channel, _ = AlertReceiveChannel.objects.get_or_create(
+ public_primary_key=public_api_constants.DEMO_INTEGRATION_ID,
+ defaults=dict(
+ integration=0,
+ author=user,
+ organization=organization,
+ smile_code=number_to_smiles_translator(0)
+ )
+ )
+ escalation_chain, _ = EscalationChain.objects.get_or_create(
+ public_primary_key=public_api_constants.DEMO_ESCALATION_CHAIN_ID,
+ defaults=dict(
+ name="default",
+ organization=organization,
+ )
+ )
+
+ channel_filter_1, _ = ChannelFilter.objects.get_or_create(
+ public_primary_key=public_api_constants.DEMO_ROUTE_ID_1,
+ defaults=dict(
+ alert_receive_channel=alert_receive_channel,
+ slack_channel_id=public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID,
+ filtering_term='us-(east|west)',
+ order=0,
+ escalation_chain=escalation_chain,
+ )
+ )
+ ChannelFilter.objects.get_or_create(
+ public_primary_key=public_api_constants.DEMO_ROUTE_ID_2,
+ defaults=dict(
+ alert_receive_channel=alert_receive_channel,
+ slack_channel_id=public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID,
+ filtering_term='.*',
+ order=1,
+ is_default=True,
+ escalation_chain=escalation_chain,
+ )
+ )
+
+ EscalationPolicy.objects.get_or_create(
+ public_primary_key=public_api_constants.DEMO_ESCALATION_POLICY_ID_1,
+ defaults=dict(
+ step=STEP_WAIT,
+ wait_delay=timezone.timedelta(minutes=1),
+ order=0,
+ escalation_chain=escalation_chain,
+ )
+ )
+
+ escalation_policy_1, _ = EscalationPolicy.objects.get_or_create(
+ public_primary_key=public_api_constants.DEMO_ESCALATION_POLICY_ID_2,
+ defaults=dict(
+ step=STEP_NOTIFY_USERS_QUEUE,
+ order=1,
+ escalation_chain=escalation_chain,
+ )
+ )
+ escalation_policy_1.notify_to_users_queue.add(user)
+
+ schedule, _ = OnCallScheduleICal.objects.get_or_create(
+ public_primary_key=public_api_constants.DEMO_SCHEDULE_ID_ICAL,
+ defaults=dict(
+ organization=organization,
+ name=public_api_constants.DEMO_SCHEDULE_NAME_ICAL,
+ ical_url_overrides=public_api_constants.DEMO_SCHEDULE_ICAL_URL_OVERRIDES,
+ channel=public_api_constants.DEMO_SLACK_CHANNEL_SLACK_ID,
+ )
+ )
+
+ alert_group, _ = AlertGroup.all_objects.get_or_create(
+ public_primary_key=public_api_constants.DEMO_INCIDENT_ID,
+ defaults=dict(
+ channel=alert_receive_channel,
+ channel_filter=channel_filter_1,
+ resolved=True,
+ resolved_at=dateparse.parse_datetime(public_api_constants.DEMO_INCIDENT_RESOLVED_AT),
+ )
+ )
+ alert_group.started_at = dateparse.parse_datetime(public_api_constants.DEMO_INCIDENT_CREATED_AT)
+ alert_group.save(update_fields=['started_at'])
+
+ for id, created_at in public_api_constants.DEMO_ALERT_IDS:
+ alert, _ = Alert.objects.get_or_create(
+ public_primary_key=id,
+ defaults=dict(
+ group=alert_group,
+ raw_request_data=public_api_constants.DEMO_ALERT_PAYLOAD,
+ title='Memory above 90% threshold',
+ )
+ )
+ alert.created_at = dateparse.parse_datetime(created_at)
+ alert.save(update_fields=['created_at'])
+
+ CustomButton.objects.get_or_create(
+ public_primary_key=public_api_constants.DEMO_CUSTOM_ACTION_ID,
+ defaults=dict(
+ name=public_api_constants.DEMO_CUSTOM_ACTION_NAME,
+ organization=organization,
+ )
+ )
+
+ on_call_shift_1, _ = CustomOnCallShift.objects.get_or_create(
+ public_primary_key=public_api_constants.DEMO_ON_CALL_SHIFT_ID_1,
+ defaults=dict(
+ type=TYPE_SINGLE_EVENT,
+ organization=organization,
+ name=public_api_constants.DEMO_ON_CALL_SHIFT_NAME_1,
+ start=dateparse.parse_datetime(public_api_constants.DEMO_ON_CALL_SHIFT_START_1),
+ duration=timezone.timedelta(seconds=public_api_constants.DEMO_ON_CALL_SHIFT_DURATION),
+ )
+ )
+
+ on_call_shift_1.users.add(user)
+
+ on_call_shift_2, _ = CustomOnCallShift.objects.get_or_create(
+ public_primary_key=public_api_constants.DEMO_ON_CALL_SHIFT_ID_2,
+ defaults=dict(
+ type=TYPE_RECURRENT_EVENT,
+ organization=organization,
+ name=public_api_constants.DEMO_ON_CALL_SHIFT_NAME_2,
+ start=dateparse.parse_datetime(public_api_constants.DEMO_ON_CALL_SHIFT_START_2),
+ duration=timezone.timedelta(seconds=public_api_constants.DEMO_ON_CALL_SHIFT_DURATION),
+ frequency=FREQUENCY_WEEKLY,
+ interval=2,
+ by_day=public_api_constants.DEMO_ON_CALL_SHIFT_BY_DAY,
+ source=SOURCE_TERRAFORM,
+ )
+ )
+
+ on_call_shift_2.users.add(user)
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('alerts', '0002_squashed_initial'),
+ ('user_management', '0002_squashed_create_demo_token_instances'),
+ ('schedules', '0002_squashed_initial'),
+ ]
+
+ operations = [
+ migrations.RunPython(create_demo_token_instances, migrations.RunPython.noop)
+ ]
diff --git a/engine/apps/alerts/migrations/__init__.py b/engine/apps/alerts/migrations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/alerts/models/__init__.py b/engine/apps/alerts/models/__init__.py
new file mode 100644
index 0000000000..86d811c910
--- /dev/null
+++ b/engine/apps/alerts/models/__init__.py
@@ -0,0 +1,15 @@
+from .alert import Alert, listen_for_alert_model_save # noqa: F401
+from .alert_group import AlertGroup # noqa: F401
+from .alert_group_counter import AlertGroupCounter # noqa: F401
+from .alert_group_log_record import AlertGroupLogRecord, listen_for_alertgrouplogrecord # noqa: F401
+from .alert_manager_models import AlertForAlertManager, AlertGroupForAlertManager # noqa: F401
+from .alert_receive_channel import AlertReceiveChannel, listen_for_alertreceivechannel_model_save # noqa: F401
+from .channel_filter import ChannelFilter # noqa: F401
+from .custom_button import CustomButton # noqa: F401
+from .escalation_chain import EscalationChain # noqa: F401
+from .escalation_policy import EscalationPolicy # noqa: F401
+from .grafana_alerting_contact_point import GrafanaAlertingContactPoint # noqa: F401
+from .invitation import Invitation # noqa: F401
+from .maintainable_object import MaintainableObject # noqa: F401
+from .resolution_note import ResolutionNote, ResolutionNoteSlackMessage # noqa: F401
+from .user_has_notification import UserHasNotification # noqa: F401
diff --git a/engine/apps/alerts/models/alert.py b/engine/apps/alerts/models/alert.py
new file mode 100644
index 0000000000..3e08e7b911
--- /dev/null
+++ b/engine/apps/alerts/models/alert.py
@@ -0,0 +1,273 @@
+import hashlib
+import logging
+from uuid import uuid4
+
+from django.apps import apps
+from django.conf import settings
+from django.core.validators import MinLengthValidator
+from django.db import models, transaction
+from django.db.models import JSONField
+from django.db.models.signals import post_save
+
+from apps.alerts.constants import TASK_DELAY_SECONDS
+from apps.alerts.incident_appearance.templaters import TemplateLoader
+from apps.alerts.tasks import distribute_alert, send_alert_group_signal
+from common.jinja_templater import apply_jinja_template
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+def generate_public_primary_key_for_alert():
+ prefix = "A"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while Alert.objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="Alert"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+class Alert(models.Model):
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_alert,
+ )
+
+ is_resolve_signal = models.BooleanField(default=False)
+ is_the_first_alert_in_group = models.BooleanField(default=False)
+ message = models.TextField(max_length=3000, default=None, null=True)
+ image_url = models.URLField(default=None, null=True, max_length=300)
+ delivered = models.BooleanField(default=False)
+ title = models.TextField(max_length=1500, default=None, null=True)
+
+ created_at = models.DateTimeField(auto_now_add=True)
+ link_to_upstream_details = models.URLField(max_length=500, default=None, null=True)
+ integration_unique_data = JSONField(default=None, null=True)
+ raw_request_data = JSONField()
+
+ # This hash is for integration-specific needs
+ integration_optimization_hash = models.CharField(max_length=100, db_index=True, default=None, null=True)
+
+ group = models.ForeignKey(
+ "alerts.AlertGroup", on_delete=models.CASCADE, null=True, default=None, related_name="alerts"
+ )
+
+ def get_integration_optimization_hash(self):
+ """
+ Should be overloaded in child classes.
+ """
+ raise NotImplementedError
+
+ @classmethod
+ def create(
+ cls,
+ title,
+ message,
+ image_url,
+ link_to_upstream_details,
+ alert_receive_channel,
+ integration_unique_data,
+ raw_request_data,
+ enable_autoresolve=True,
+ is_demo=False,
+ force_route_id=None,
+ ):
+ ChannelFilter = apps.get_model("alerts", "ChannelFilter")
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ group_data = Alert.render_group_data(alert_receive_channel, raw_request_data, is_demo)
+ channel_filter = ChannelFilter.select_filter(
+ alert_receive_channel, raw_request_data, title, message, force_route_id
+ )
+
+ group, group_created = AlertGroup.all_objects.get_or_create_grouping(
+ channel=alert_receive_channel,
+ channel_filter=channel_filter,
+ group_data=group_data,
+ )
+
+ if group_created:
+ group.log_records.create(type=AlertGroupLogRecord.TYPE_REGISTERED)
+ group.log_records.create(type=AlertGroupLogRecord.TYPE_ROUTE_ASSIGNED)
+
+ mark_as_resolved = (
+ enable_autoresolve and group_data.is_resolve_signal and alert_receive_channel.allow_source_based_resolving
+ )
+ if not group.resolved and mark_as_resolved:
+ group.resolve_by_source()
+
+ mark_as_acknowledged = group_data.is_acknowledge_signal
+ if not group.acknowledged and mark_as_acknowledged:
+ group.acknowledge_by_source()
+
+ alert = cls(
+ is_resolve_signal=group_data.is_resolve_signal,
+ title=title,
+ message=message,
+ image_url=image_url,
+ link_to_upstream_details=link_to_upstream_details,
+ group=group,
+ integration_unique_data=integration_unique_data,
+ raw_request_data=raw_request_data,
+ is_the_first_alert_in_group=group_created,
+ )
+
+ alert.save()
+
+ maintenance_uuid = None
+ if alert_receive_channel.organization.maintenance_mode == AlertReceiveChannel.MAINTENANCE:
+ maintenance_uuid = alert_receive_channel.organization.maintenance_uuid
+
+ elif alert_receive_channel.maintenance_mode == AlertReceiveChannel.MAINTENANCE:
+ maintenance_uuid = alert_receive_channel.maintenance_uuid
+
+ if maintenance_uuid is not None:
+ try:
+ maintenance_incident = AlertGroup.all_objects.get(maintenance_uuid=maintenance_uuid)
+ group.root_alert_group = maintenance_incident
+ group.save(update_fields=["root_alert_group"])
+ log_record_for_root_incident = maintenance_incident.log_records.create(
+ type=AlertGroupLogRecord.TYPE_ATTACHED, dependent_alert_group=group, reason="Attach dropdown"
+ )
+ logger.debug(
+ f"call send_alert_group_signal for alert_group {maintenance_incident.pk} (maintenance), "
+ f"log record {log_record_for_root_incident.pk} with type "
+ f"'{log_record_for_root_incident.get_type_display()}'"
+ )
+ send_alert_group_signal.apply_async((log_record_for_root_incident.pk,))
+ except AlertGroup.DoesNotExist:
+ pass
+
+ return alert
+
+ def wipe(self, wiped_by, wiped_at):
+ wiped_by_user_verbal = "by " + wiped_by.username
+
+ self.integration_unique_data = {}
+ self.raw_request_data = {}
+ self.title = f"Wiped {wiped_by_user_verbal} at {wiped_at.strftime('%Y-%m-%d')}"
+ self.message = ""
+ self.image_url = None
+ self.link_to_upstream_details = None
+ self.save(
+ update_fields=[
+ "integration_unique_data",
+ "raw_request_data",
+ "title",
+ "message",
+ "image_url",
+ "link_to_upstream_details",
+ ]
+ )
+
+ @classmethod
+ def render_group_data(cls, alert_receive_channel, raw_request_data, is_demo=False):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+
+ template_manager = TemplateLoader()
+
+ is_resolve_signal = False
+ is_acknowledge_signal = False
+ group_distinction = None
+ group_verbose_name = "Incident"
+
+ acknowledge_condition_template = template_manager.get_attr_template(
+ "acknowledge_condition", alert_receive_channel
+ )
+ resolve_condition_template = template_manager.get_attr_template("resolve_condition", alert_receive_channel)
+ grouping_id_template = template_manager.get_attr_template("grouping_id", alert_receive_channel)
+ # use get_default_attr_template because there is no ability to customize group_verbose_name, only default value
+ group_verbose_name_template = template_manager.get_default_attr_template(
+ "group_verbose_name", alert_receive_channel
+ )
+ if group_verbose_name_template is not None:
+ group_verbose_name, _ = apply_jinja_template(group_verbose_name_template, raw_request_data)
+
+ if grouping_id_template is not None:
+ group_distinction, _ = apply_jinja_template(grouping_id_template, raw_request_data)
+
+ # Insert demo uuid to prevent grouping of demo alerts.
+ if is_demo:
+ group_distinction = cls.insert_demo_uuid(group_distinction)
+
+ if group_distinction is not None:
+ group_distinction = hashlib.md5(str(group_distinction).encode()).hexdigest()
+
+ if resolve_condition_template is not None:
+ is_resolve_signal, _ = apply_jinja_template(resolve_condition_template, payload=raw_request_data)
+ if isinstance(is_resolve_signal, str):
+ is_resolve_signal = is_resolve_signal.strip().lower() in ["1", "true", "ok"]
+ else:
+ is_resolve_signal = False
+ if acknowledge_condition_template is not None:
+ is_acknowledge_signal, _ = apply_jinja_template(acknowledge_condition_template, payload=raw_request_data)
+ if isinstance(is_acknowledge_signal, str):
+ is_acknowledge_signal = is_acknowledge_signal.strip().lower() in ["1", "true", "ok"]
+ else:
+ is_acknowledge_signal = False
+
+ return AlertGroup.GroupData(
+ is_resolve_signal=is_resolve_signal,
+ is_acknowledge_signal=is_acknowledge_signal,
+ group_distinction=group_distinction,
+ group_verbose_name=group_verbose_name,
+ )
+
+ @staticmethod
+ def insert_demo_uuid(distinction):
+ if distinction is not None:
+ distinction += str(uuid4())
+ else:
+ distinction = str(uuid4())
+
+ return distinction
+
+ @property
+ def skip_signal(self):
+ try:
+ _ = self.migrator_lock
+ return True
+ except Alert.migrator_lock.RelatedObjectDoesNotExist:
+ return False
+
+
+def listen_for_alert_model_save(sender, instance, created, *args, **kwargs):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ """
+ Here we invoke AlertShootingStep by model saving action.
+ """
+ if created and instance.group.maintenance_uuid is None and not instance.skip_signal:
+ # RFCT - why additinal save ?
+ instance.save()
+
+ group = instance.group
+ # Store exact alert which resolved group.
+ if group.resolved_by == AlertGroup.SOURCE and group.resolved_by_alert is None:
+ group.resolved_by_alert = instance
+ group.save(update_fields=["resolved_by_alert"])
+
+ if settings.DEBUG:
+ distribute_alert(instance.pk)
+ else:
+ distribute_alert.apply_async((instance.pk,), countdown=TASK_DELAY_SECONDS)
+
+ logger.info(f"Recalculate AG cache. Reason: save alert model {instance.pk}")
+ transaction.on_commit(instance.group.schedule_cache_for_web)
+
+
+# Connect signal to base Alert class
+post_save.connect(listen_for_alert_model_save, Alert)
+
+# And subscribe for events from child classes
+for subclass in Alert.__subclasses__():
+ post_save.connect(listen_for_alert_model_save, subclass)
diff --git a/engine/apps/alerts/models/alert_group.py b/engine/apps/alerts/models/alert_group.py
new file mode 100644
index 0000000000..72e93ebc65
--- /dev/null
+++ b/engine/apps/alerts/models/alert_group.py
@@ -0,0 +1,1616 @@
+import logging
+from collections import namedtuple
+from typing import Optional
+from urllib.parse import urljoin
+from uuid import uuid1
+
+import pytz
+from celery import uuid as celery_uuid
+from django.apps import apps
+from django.conf import settings
+from django.core.cache import cache
+from django.core.validators import MinLengthValidator
+from django.db import IntegrityError, models, transaction
+from django.db.models import JSONField, Q, QuerySet
+from django.db.models.signals import post_save
+from django.dispatch import receiver
+from django.utils import timezone
+from django.utils.functional import cached_property
+
+from apps.alerts.escalation_snapshot import EscalationSnapshotMixin
+from apps.alerts.incident_appearance.renderers.constants import DEFAULT_BACKUP_TITLE
+from apps.alerts.incident_appearance.renderers.slack_renderer import AlertGroupSlackRenderer
+from apps.alerts.incident_log_builder import IncidentLogBuilder
+from apps.alerts.signals import alert_group_action_triggered_signal
+from apps.alerts.tasks import (
+ acknowledge_reminder_task,
+ call_ack_url,
+ schedule_cache_for_alert_group,
+ send_alert_group_signal,
+ unsilence_task,
+)
+from apps.slack.slack_formatter import SlackFormatter
+from apps.user_management.models import User
+from common.mixins.use_random_readonly_db_manager_mixin import UseRandomReadonlyDbManagerMixin
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+from common.utils import clean_markup, str_or_backup
+
+from .alert_group_counter import AlertGroupCounter
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+def generate_public_primary_key_for_alert_group():
+ prefix = "I"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while AlertGroup.all_objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="AlertGroup"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+class AlertGroupQuerySet(models.QuerySet):
+ def create(self, **kwargs):
+ organization = kwargs["channel"].organization
+
+ inside_organization_number = AlertGroupCounter.objects.get_value(organization=organization) + 1
+ return super().create(**kwargs, inside_organization_number=inside_organization_number)
+
+ def get_or_create_grouping(self, channel, channel_filter, group_data):
+ """
+ This method is similar to default Django QuerySet.get_or_create(), please see the original get_or_create method.
+ The difference is that this method is trying to get an object using multiple queries with different filters.
+ Also, "create" is invoked without transaction.atomic to reduce number of ConcurrentUpdateError's which can be
+ raised in AlertGroupQuerySet.create() due to optimistic locking of AlertGroupCounter model.
+ """
+ search_params = {
+ "channel": channel,
+ "channel_filter": channel_filter,
+ "distinction": group_data.group_distinction,
+ }
+
+ # Try to return the last open group
+ # Note that (channel, channel_filter, distinction, is_open_for_grouping) is in unique_together
+ try:
+ return self.get(**search_params, is_open_for_grouping=True), False
+ except self.model.DoesNotExist:
+ pass
+
+ # If it's an "OK" alert, try to return the latest resolved group
+ if group_data.is_resolve_signal:
+ try:
+ return self.filter(**search_params, resolved=True).latest(), False
+ except self.model.DoesNotExist:
+ pass
+
+ # Create a new group if we couldn't group it to any existing ones
+ try:
+ return (
+ self.create(**search_params, is_open_for_grouping=True, verbose_name=group_data.group_verbose_name),
+ True,
+ )
+ except IntegrityError:
+ try:
+ return self.get(**search_params, is_open_for_grouping=True), False
+ except self.model.DoesNotExist:
+ pass
+ raise
+
+
+class UnarchivedAlertGroupQuerySet(models.QuerySet):
+ def filter(self, *args, **kwargs):
+ return super().filter(*args, **kwargs, is_archived=False)
+
+
+class AlertGroupManager(UseRandomReadonlyDbManagerMixin, models.Manager):
+ pass
+
+
+class AlertGroupSlackRenderingMixin:
+ """
+ Ideally this mixin should not exist. Instead of this instance of AlertGroupSlackRenderer should be created and used
+ but slack rendering is distributed throughout the codebase.
+ """
+
+ @cached_property
+ def slack_renderer(self):
+ return AlertGroupSlackRenderer(self)
+
+ def render_slack_attachments(self):
+ return self.slack_renderer.render_alert_group_attachments()
+
+ def render_slack_blocks(self):
+ return self.slack_renderer.render_alert_group_blocks()
+
+ @property
+ def slack_templated_first_alert(self):
+ return self.slack_renderer.alert_renderer.templated_alert
+
+
+class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.Model):
+ all_objects = AlertGroupManager.from_queryset(AlertGroupQuerySet)()
+ unarchived_objects = AlertGroupManager.from_queryset(UnarchivedAlertGroupQuerySet)()
+
+ (
+ NEW,
+ ACKNOWLEDGED,
+ RESOLVED,
+ SILENCED,
+ ) = range(4)
+
+ # exists for status filter in API
+ STATUS_CHOICES = ((NEW, "New"), (ACKNOWLEDGED, "Acknowledged"), (RESOLVED, "Resolved"), (SILENCED, "Silenced"))
+
+ GroupData = namedtuple(
+ "GroupData", ["is_resolve_signal", "group_distinction", "group_verbose_name", "is_acknowledge_signal"]
+ )
+
+ SOURCE, USER, NOT_YET, LAST_STEP, ARCHIVED, WIPED, DISABLE_MAINTENANCE = range(7)
+ SOURCE_CHOICES = (
+ (SOURCE, "source"),
+ (USER, "user"),
+ (NOT_YET, "not yet"),
+ (LAST_STEP, "last escalation step"),
+ (ARCHIVED, "archived"),
+ (WIPED, "wiped"),
+ (DISABLE_MAINTENANCE, "stop maintenance"),
+ )
+
+ ACKNOWLEDGE = "acknowledge"
+ RESOLVE = "resolve"
+ SILENCE = "silence"
+ RESTART = "restart"
+
+ BULK_ACTIONS = [
+ ACKNOWLEDGE,
+ RESOLVE,
+ SILENCE,
+ RESTART,
+ ]
+
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_alert_group,
+ )
+
+ channel = models.ForeignKey(
+ "alerts.AlertReceiveChannel",
+ on_delete=models.CASCADE,
+ related_name="alert_groups",
+ )
+
+ # Distinction is a difference between groups inside the same channel.
+ # For example different types of alerts from the same channel should go to different groups.
+ # Distinction is what describes their difference.
+ distinction = models.CharField(max_length=100, null=True, default=None, db_index=True)
+ verbose_name = models.TextField(null=True, default=None)
+
+ inside_organization_number = models.IntegerField(default=0)
+
+ channel_filter = models.ForeignKey(
+ "alerts.ChannelFilter",
+ on_delete=models.SET_DEFAULT,
+ related_name="alert_groups",
+ null=True,
+ default=None,
+ )
+
+ resolved = models.BooleanField(default=False)
+
+ resolved_by = models.IntegerField(choices=SOURCE_CHOICES, default=NOT_YET)
+ resolved_by_user = models.ForeignKey(
+ "user_management.User",
+ on_delete=models.SET_NULL,
+ null=True,
+ default=None,
+ related_name="resolved_alert_groups",
+ )
+
+ resolved_by_alert = models.ForeignKey(
+ "alerts.Alert",
+ on_delete=models.SET_NULL,
+ null=True,
+ default=None,
+ related_name="resolved_alert_groups",
+ )
+
+ resolved_at = models.DateTimeField(blank=True, null=True)
+ acknowledged = models.BooleanField(default=False)
+ acknowledged_on_source = models.BooleanField(default=False)
+ acknowledged_at = models.DateTimeField(blank=True, null=True)
+ acknowledged_by = models.IntegerField(choices=SOURCE_CHOICES, default=NOT_YET)
+ acknowledged_by_user = models.ForeignKey(
+ "user_management.User",
+ on_delete=models.SET_NULL,
+ null=True,
+ default=None,
+ )
+ acknowledged_by_confirmed = models.DateTimeField(null=True, default=None)
+
+ is_escalation_finished = models.BooleanField(default=False)
+ started_at = models.DateTimeField(auto_now_add=True)
+
+ slack_message_sent = models.BooleanField(default=False)
+
+ active_escalation_id = models.CharField(max_length=100, null=True, default=None) # ID generated by celery
+ active_resolve_calculation_id = models.CharField(max_length=100, null=True, default=None) # ID generated by celery
+ # ID generated by celery
+ active_cache_for_web_calculation_id = models.CharField(max_length=100, null=True, default=None)
+
+ SILENCE_DELAY_OPTIONS = (
+ (1800, "30 minutes"),
+ (3600, "1 hour"),
+ (14400, "4 hours"),
+ (43200, "12 hours"),
+ (57600, "16 hours"),
+ (72000, "20 hours"),
+ (86400, "24 hours"),
+ (-1, "Forever"),
+ )
+ silenced = models.BooleanField(default=False)
+ silenced_at = models.DateTimeField(null=True)
+ silenced_by_user = models.ForeignKey(
+ "user_management.User",
+ on_delete=models.SET_NULL,
+ null=True,
+ default=None,
+ related_name="silenced_alert_groups",
+ )
+ silenced_until = models.DateTimeField(blank=True, null=True)
+ unsilence_task_uuid = models.CharField(max_length=100, null=True, default=None)
+
+ @property
+ def is_silenced_forever(self):
+ return self.silenced and self.silenced_until is None
+
+ @property
+ def is_silenced_for_period(self):
+ return self.silenced and self.silenced_until is not None
+
+ @property
+ def status(self):
+ if self.resolved:
+ return AlertGroup.RESOLVED
+ elif self.acknowledged:
+ return AlertGroup.ACKNOWLEDGED
+ elif self.silenced:
+ return AlertGroup.SILENCED
+ else:
+ return AlertGroup.NEW
+
+ ACCOUNT_INACTIVE, CHANNEL_ARCHIVED, NO_REASON, RATE_LIMITED, CHANNEL_NOT_SPECIFIED, RESTRICTED_ACTION = range(6)
+ REASONS_TO_SKIP_ESCALATIONS = (
+ (ACCOUNT_INACTIVE, "account_inactive"),
+ (CHANNEL_ARCHIVED, "channel_archived"),
+ (NO_REASON, "no_reason"),
+ (RATE_LIMITED, "rate_limited"),
+ (CHANNEL_NOT_SPECIFIED, "channel_not_specified"),
+ (RESTRICTED_ACTION, "restricted_action"),
+ )
+ reason_to_skip_escalation = models.IntegerField(choices=REASONS_TO_SKIP_ESCALATIONS, default=NO_REASON)
+
+ SEVERITY_HIGH, SEVERITY_LOW, SEVERITY_NONE = range(3)
+ SEVERITY_CHOICES = (
+ (SEVERITY_HIGH, "high"),
+ (SEVERITY_LOW, "low"),
+ (SEVERITY_NONE, "none"),
+ )
+ manual_severity = models.IntegerField(choices=SEVERITY_CHOICES, default=SEVERITY_NONE)
+
+ resolution_note_ts = models.CharField(max_length=100, null=True, default=None)
+
+ root_alert_group = models.ForeignKey(
+ "alerts.AlertGroup",
+ on_delete=models.SET_NULL,
+ null=True,
+ default=None,
+ related_name="dependent_alert_groups",
+ )
+
+ cached_render_for_web = JSONField(default=dict)
+
+ last_unique_unacknowledge_process_id = models.CharField(max_length=100, null=True, default=None)
+ is_archived = models.BooleanField(default=False)
+
+ wiped_at = models.DateTimeField(null=True, default=None)
+ wiped_by = models.ForeignKey(
+ "user_management.User", on_delete=models.SET_NULL, null=True, default=None, related_name="wiped_by_user"
+ )
+
+ slack_message = models.OneToOneField(
+ "slack.SlackMessage",
+ on_delete=models.SET_NULL,
+ null=True,
+ default=None,
+ related_name="_alert_group",
+ )
+
+ slack_log_message = models.OneToOneField(
+ "slack.SlackMessage",
+ on_delete=models.SET_NULL,
+ null=True,
+ default=None,
+ )
+
+ prevent_posting_alerts = models.BooleanField(default=False)
+ maintenance_uuid = models.CharField(max_length=100, unique=True, null=True, default=None)
+
+ raw_escalation_snapshot = JSONField(null=True, default=None)
+ estimate_escalation_finish_time = models.DateTimeField(null=True, default=None)
+
+ # This field is used for constraints so we can use get_or_create() in concurrent calls
+ # https://docs.djangoproject.com/en/3.2/ref/models/querysets/#get-or-create
+ # Combined with unique_together below, it allows only one alert group with
+ # the combination (alert_receive_channel_id, channel_filter_id, distinction, is_open_for_grouping=True)
+ # If is_open_for_grouping=None, then we can have as many combinations of
+ # (alert_receive_channel_id, channel_filter_id, distinction, is_open_for_grouping=None) as we want
+ # We just don't care about that because we'll use only get_or_create(...is_open_for_grouping=True...)
+ # https://code.djangoproject.com/ticket/28545
+ is_open_for_grouping = models.BooleanField(default=None, null=True, blank=True)
+
+ class Meta:
+ get_latest_by = "pk"
+ unique_together = [
+ "channel_id",
+ "channel_filter_id",
+ "distinction",
+ "is_open_for_grouping",
+ ]
+
+ def __str__(self):
+ return f"{self.pk}: {self.verbose_name}"
+
+ @property
+ def is_maintenance_incident(self):
+ return self.maintenance_uuid is not None
+
+ def stop_maintenance(self, user: User) -> None:
+ Organization = apps.get_model("user_management", "Organization")
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+
+ try:
+ integration_on_maintenance = AlertReceiveChannel.objects.get(maintenance_uuid=self.maintenance_uuid)
+ integration_on_maintenance.force_disable_maintenance(user)
+ return
+ except AlertReceiveChannel.DoesNotExist:
+ pass
+
+ try:
+ organization_on_maintenance = Organization.objects.get(maintenance_uuid=self.maintenance_uuid)
+ organization_on_maintenance.force_disable_maintenance(user)
+ return
+ except Organization.DoesNotExist:
+ pass
+
+ self.resolve_by_disable_maintenance()
+
+ @property
+ def skip_escalation_in_slack(self):
+ return self.reason_to_skip_escalation in (
+ AlertGroup.CHANNEL_ARCHIVED,
+ AlertGroup.ACCOUNT_INACTIVE,
+ AlertGroup.RATE_LIMITED,
+ AlertGroup.CHANNEL_NOT_SPECIFIED,
+ )
+
+ def is_alert_a_resolve_signal(self, alert):
+ raise NotImplementedError
+
+ def cache_for_web(self, organization):
+ from apps.api.serializers.alert_group import AlertGroupSerializer
+
+ # Re-take object to switch connection from readonly db to master.
+ _self = AlertGroup.all_objects.get(pk=self.pk)
+ _self.cached_render_for_web = AlertGroupSerializer(self, context={"organization": organization}).data
+ self.cached_render_for_web = _self.cached_render_for_web
+ _self.save(update_fields=["cached_render_for_web"])
+
+ def schedule_cache_for_web(self):
+ schedule_cache_for_alert_group.apply_async((self.pk,))
+
+ @property
+ def permalink(self):
+ if self.slack_message is not None:
+ return self.slack_message.permalink
+
+ @property
+ def web_link(self):
+ return urljoin(self.channel.organization.web_link, f"?page=incident&id={self.public_primary_key}")
+
+ @property
+ def alerts_count(self):
+ return self.alerts.count()
+
+ @property
+ def happened_while_maintenance(self):
+ return self.root_alert_group is not None and self.root_alert_group.maintenance_uuid is not None
+
+ def acknowledge_by_user(self, user: User, action_source: Optional[str] = None) -> None:
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ logger.debug(f"Started acknowledge_by_user for alert_group {self.pk}")
+ # if incident was silenced or resolved, unsilence/unresolve it without starting escalation
+ if self.silenced:
+ self.un_silence()
+ self.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UN_SILENCE,
+ author=user,
+ silence_delay=None,
+ reason="Acknowledge button",
+ )
+ if self.resolved:
+ self.unresolve()
+ self.log_records.create(type=AlertGroupLogRecord.TYPE_UN_RESOLVED, author=user, reason="Acknowledge button")
+
+ # clear resolve report cache
+ cache_key = "render_after_resolve_report_json_{}".format(self.pk)
+ cache.delete(cache_key)
+
+ self.acknowledge(acknowledged_by_user=user, acknowledged_by=AlertGroup.USER)
+ self.stop_escalation()
+ if self.is_root_alert_group:
+ self.start_ack_reminder(user)
+
+ if self.can_call_ack_url:
+ self.start_call_ack_url()
+
+ log_record = self.log_records.create(type=AlertGroupLogRecord.TYPE_ACK, author=user)
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}', action source: {action_source}"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.acknowledge_by_user,
+ log_record=log_record.pk,
+ action_source=action_source,
+ )
+
+ for dependent_alert_group in self.dependent_alert_groups.all():
+ dependent_alert_group.acknowledge_by_user(user, action_source=action_source)
+
+ logger.debug(f"Finished acknowledge_by_user for alert_group {self.pk}")
+
+ def acknowledge_by_source(self):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ # if incident was silenced, unsilence it without starting escalation
+ if self.silenced:
+ self.un_silence()
+ self.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UN_SILENCE,
+ silence_delay=None,
+ reason="Acknowledge by source",
+ )
+ self.acknowledge(acknowledged_by=AlertGroup.SOURCE)
+ self.stop_escalation()
+
+ log_record = self.log_records.create(type=AlertGroupLogRecord.TYPE_ACK)
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}', action source: alert"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.acknowledge_by_source,
+ log_record=log_record.pk,
+ action_source=None,
+ )
+
+ for dependent_alert_group in self.dependent_alert_groups.all():
+ dependent_alert_group.acknowledge_by_source()
+
+ def un_acknowledge_by_user(self, user: User, action_source: Optional[str] = None) -> None:
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ logger.debug(f"Started un_acknowledge_by_user for alert_group {self.pk}")
+ self.unacknowledge()
+ if self.is_root_alert_group:
+ self.start_escalation_if_needed()
+
+ log_record = self.log_records.create(type=AlertGroupLogRecord.TYPE_UN_ACK, author=user)
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}', action source: {action_source}"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.un_acknowledge_by_user,
+ log_record=log_record.pk,
+ action_source=action_source,
+ )
+
+ for dependent_alert_group in self.dependent_alert_groups.all():
+ dependent_alert_group.un_acknowledge_by_user(user, action_source=action_source)
+ logger.debug(f"Finished un_acknowledge_by_user for alert_group {self.pk}")
+
+ def resolve_by_user(self, user: User, action_source: Optional[str] = None) -> None:
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ # if incident was silenced, unsilence it without starting escalation
+ if self.silenced:
+ self.un_silence()
+ self.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UN_SILENCE,
+ author=user,
+ silence_delay=None,
+ reason="Resolve button",
+ )
+ self.resolve(resolved_by=AlertGroup.USER, resolved_by_user=user)
+ self.stop_escalation()
+ log_record = self.log_records.create(type=AlertGroupLogRecord.TYPE_RESOLVED, author=user)
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}', action source: {action_source}"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.resolve_by_user,
+ log_record=log_record.pk,
+ action_source=action_source,
+ )
+
+ for dependent_alert_group in self.dependent_alert_groups.all():
+ dependent_alert_group.resolve_by_user(user, action_source=action_source)
+
+ def resolve_by_source(self):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ # if incident was silenced, unsilence it without starting escalation
+ if self.silenced:
+ self.un_silence()
+ self.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UN_SILENCE,
+ silence_delay=None,
+ reason="Resolve by source",
+ )
+ self.resolve(resolved_by=AlertGroup.SOURCE)
+ self.stop_escalation()
+ log_record = self.log_records.create(type=AlertGroupLogRecord.TYPE_RESOLVED)
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}', action source: alert"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.resolve_by_source,
+ log_record=log_record.pk,
+ action_source=None,
+ )
+
+ for dependent_alert_group in self.dependent_alert_groups.all():
+ dependent_alert_group.resolve_by_source()
+
+ def resolve_by_archivation(self):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ # if incident was silenced, unsilence it without starting escalation
+ if self.silenced:
+ self.un_silence()
+ self.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UN_SILENCE,
+ silence_delay=None,
+ reason="Resolve by archivation",
+ )
+ self.archive()
+ self.stop_escalation()
+ if not self.resolved:
+ self.resolve(resolved_by=AlertGroup.ARCHIVED)
+
+ log_record = self.log_records.create(type=AlertGroupLogRecord.TYPE_RESOLVED)
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}', action source: archivation"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.resolve_by_archivation,
+ log_record=log_record.pk,
+ action_source=None,
+ )
+
+ for dependent_alert_group in self.dependent_alert_groups.all():
+ dependent_alert_group.resolve_by_archivation()
+
+ def resolve_by_last_step(self):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ self.resolve(resolved_by=AlertGroup.LAST_STEP)
+ self.stop_escalation()
+ log_record = self.log_records.create(type=AlertGroupLogRecord.TYPE_RESOLVED)
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}', action source: resolve step"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.resolve_by_last_step,
+ log_record=log_record.pk,
+ action_source=None,
+ )
+
+ for dependent_alert_group in self.dependent_alert_groups.all():
+ dependent_alert_group.resolve_by_last_step()
+
+ def resolve_by_disable_maintenance(self):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ self.resolve(resolved_by=AlertGroup.DISABLE_MAINTENANCE)
+ self.stop_escalation()
+ log_record = self.log_records.create(type=AlertGroupLogRecord.TYPE_RESOLVED)
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
+ f"action source: disable maintenance"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.resolve_by_disable_maintenance,
+ log_record=log_record.pk,
+ action_source=None,
+ )
+
+ for dependent_alert_group in self.dependent_alert_groups.all():
+ dependent_alert_group.resolve_by_disable_maintenance()
+
+ def un_resolve_by_user(self, user: User, action_source: Optional[str] = None) -> None:
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ if self.wiped_at is None:
+ self.unresolve()
+ log_record = self.log_records.create(type=AlertGroupLogRecord.TYPE_UN_RESOLVED, author=user)
+
+ # clear resolve report cache
+ self.drop_cached_after_resolve_report_json()
+
+ if self.is_root_alert_group:
+ self.start_escalation_if_needed()
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
+ f"action source: {action_source}"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.un_resolve_by_user,
+ log_record=log_record.pk,
+ action_source=action_source,
+ )
+
+ for dependent_alert_group in self.dependent_alert_groups.all():
+ dependent_alert_group.un_resolve_by_user(user, action_source=action_source)
+
+ def attach_by_user(self, user: User, root_alert_group: "AlertGroup", action_source: Optional[str] = None) -> None:
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ if root_alert_group.root_alert_group is None and not root_alert_group.resolved:
+ self.root_alert_group = root_alert_group
+ self.save(update_fields=["root_alert_group"])
+ self.stop_escalation()
+ if root_alert_group.acknowledged and not self.acknowledged:
+ self.acknowledge_by_user(user, action_source=action_source)
+ elif not root_alert_group.acknowledged and self.acknowledged:
+ self.un_acknowledge_by_user(user, action_source=action_source)
+
+ if root_alert_group.silenced and not self.silenced:
+ self.silence_by_user(user, action_source=action_source, silence_delay=None)
+
+ if not root_alert_group.silenced and self.silenced:
+ self.un_silence_by_user(user, action_source=action_source)
+
+ log_record = self.log_records.create(
+ type=AlertGroupLogRecord.TYPE_ATTACHED,
+ author=user,
+ root_alert_group=root_alert_group,
+ reason="Attach dropdown",
+ )
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
+ f"action source: {action_source}"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.attach_by_user,
+ log_record=log_record.pk,
+ action_source=action_source,
+ )
+
+ log_record_for_root_incident = root_alert_group.log_records.create(
+ type=AlertGroupLogRecord.TYPE_ATTACHED,
+ author=user,
+ dependent_alert_group=self,
+ reason="Attach dropdown",
+ )
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {root_alert_group.pk}, "
+ f"log record {log_record_for_root_incident.pk} with type "
+ f"'{log_record_for_root_incident.get_type_display()}', action source: {action_source}"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.attach_by_user,
+ log_record=log_record_for_root_incident.pk,
+ action_source=action_source,
+ )
+
+ else:
+ log_record = self.log_records.create(
+ type=AlertGroupLogRecord.TYPE_FAILED_ATTACHMENT,
+ author=user,
+ root_alert_group=root_alert_group,
+ reason="Failed to attach dropdown",
+ )
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
+ f"action source: {action_source}"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.attach_by_user,
+ log_record=log_record.pk,
+ action_source=action_source,
+ )
+
+ def un_attach_by_user(self, user: User, action_source: Optional[str] = None) -> None:
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ root_alert_group = self.root_alert_group
+ self.root_alert_group = None
+ self.save(update_fields=["root_alert_group"])
+
+ self.start_escalation_if_needed()
+
+ log_record = self.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UNATTACHED,
+ author=user,
+ root_alert_group=root_alert_group,
+ reason="Unattach button",
+ )
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
+ f"action source: {action_source}"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.un_attach_by_user,
+ log_record=log_record.pk,
+ action_source=action_source,
+ )
+
+ log_record_for_root_incident = root_alert_group.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UNATTACHED,
+ author=user,
+ dependent_alert_group=self,
+ reason="Unattach dropdown",
+ )
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {root_alert_group.pk}, "
+ f"log record {log_record_for_root_incident.pk} "
+ f"with type '{log_record_for_root_incident.get_type_display()}', action source: {action_source}"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.un_attach_by_user,
+ log_record=log_record_for_root_incident.pk,
+ action_source=action_source,
+ )
+
+ def un_attach_by_delete(self):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ self.root_alert_group = None
+ self.save(update_fields=["root_alert_group"])
+
+ self.start_escalation_if_needed()
+
+ log_record = self.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UNATTACHED,
+ reason="Unattach by deleting root incident",
+ )
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
+ f"action source: delete"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.un_attach_by_delete,
+ log_record=log_record.pk,
+ action_source=None,
+ )
+
+ def silence_by_user(self, user: User, silence_delay: Optional[int], action_source: Optional[str] = None) -> None:
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ if self.resolved:
+ self.unresolve()
+ self.log_records.create(type=AlertGroupLogRecord.TYPE_UN_RESOLVED, author=user, reason="Silence button")
+
+ # clear resolve report cache
+ cache_key = "render_after_resolve_report_json_{}".format(self.pk)
+ cache.delete(cache_key)
+
+ if self.acknowledged:
+ self.unacknowledge()
+ self.log_records.create(type=AlertGroupLogRecord.TYPE_UN_ACK, author=user, reason="Silence button")
+
+ if self.silenced:
+ self.un_silence()
+ self.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UN_SILENCE,
+ author=user,
+ silence_delay=None,
+ reason="Silence button",
+ )
+
+ now = timezone.now()
+
+ if silence_delay is not None and silence_delay > 0:
+ silence_delay_timedelta = timezone.timedelta(seconds=silence_delay)
+ silenced_until = now + silence_delay_timedelta
+ if self.is_root_alert_group:
+ self.start_unsilence_task(countdown=silence_delay)
+ else:
+ silence_delay_timedelta = None
+ silenced_until = None
+
+ self.silence(silenced_at=now, silenced_until=silenced_until, silenced_by_user=user)
+
+ log_record = self.log_records.create(
+ type=AlertGroupLogRecord.TYPE_SILENCE,
+ author=user,
+ silence_delay=silence_delay_timedelta,
+ reason="Silence button",
+ )
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
+ f"action source: {action_source}"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.silence_by_user,
+ log_record=log_record.pk,
+ action_source=action_source,
+ )
+ for dependent_alert_group in self.dependent_alert_groups.all():
+ dependent_alert_group.silence_by_user(user, silence_delay, action_source)
+
+ def un_silence_by_user(self, user: User, action_source: Optional[str] = None) -> None:
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ self.un_silence()
+ if self.is_root_alert_group:
+ self.start_escalation_if_needed()
+
+ log_record = self.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UN_SILENCE,
+ author=user,
+ silence_delay=None,
+ # 2.Look like some time ago there was no TYPE_UN_SILENCE
+ reason="Unsilence button",
+ )
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
+ f"action source: {action_source}"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.un_silence_by_user,
+ log_record=log_record.pk,
+ action_source=action_source,
+ )
+ for dependent_alert_group in self.dependent_alert_groups.all():
+ dependent_alert_group.un_silence_by_user(user, action_source=action_source)
+
+ def wipe_by_user(self, user: User) -> None:
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ if not self.wiped_at:
+ self.resolve(resolved_by=AlertGroup.WIPED)
+ self.stop_escalation()
+ self.distinction = ""
+ self.verbose_name = "Wiped incident"
+ self.wiped_at = timezone.now()
+ self.wiped_by = user
+ for alert in self.alerts.all():
+ alert.wipe(wiped_by=self.wiped_by, wiped_at=self.wiped_at)
+
+ self.save(update_fields=["distinction", "verbose_name", "wiped_at", "wiped_by"])
+
+ log_record = self.log_records.create(
+ type=AlertGroupLogRecord.TYPE_WIPED,
+ author=user,
+ )
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
+ f"action source: wipe"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.wipe_by_user,
+ log_record=log_record.pk,
+ action_source=None,
+ )
+
+ for dependent_alert_group in self.dependent_alert_groups.all():
+ dependent_alert_group.wipe_by_user(user)
+
+ def delete_by_user(self, user: User):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ self.stop_escalation()
+ # prevent creating multiple logs
+ # filter instead of get_or_create cause it can be multiple logs of this type due deleting error
+ log_record = self.log_records.filter(type=AlertGroupLogRecord.TYPE_DELETED).last()
+
+ if not log_record:
+ log_record = self.log_records.create(
+ type=AlertGroupLogRecord.TYPE_DELETED,
+ author=user,
+ )
+
+ logger.debug(
+ f"send alert_group_action_triggered_signal for alert_group {self.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}', "
+ f"action source: delete"
+ )
+
+ alert_group_action_triggered_signal.send(
+ sender=self.delete_by_user,
+ log_record=log_record.pk,
+ action_source=None, # TODO: Action source is none - it is suspicious
+ # this flag forces synchrony call for action handler in representatives
+ # (for now it is actual only for Slack representative)
+ force_sync=True,
+ )
+
+ dependent_alerts = list(self.dependent_alert_groups.all())
+
+ self.hard_delete()
+
+ for dependent_alert_group in dependent_alerts: # unattach dependent incidents
+ dependent_alert_group.un_attach_by_delete()
+
+ def hard_delete(self):
+ ResolutionNote = apps.get_model("alerts", "ResolutionNote")
+
+ alerts = self.alerts.all()
+ alerts.delete()
+
+ self.slack_messages.all().delete()
+ self.personal_log_records.all().delete()
+ self.log_records.all().delete()
+ self.invitations.all().delete()
+ resolution_notes = ResolutionNote.objects_with_deleted.filter(alert_group=self)
+ resolution_notes.delete()
+ self.resolution_note_slack_messages.all().delete()
+ self.delete()
+
+ @staticmethod
+ def bulk_acknowledge(user: User, alert_groups: "QuerySet[AlertGroup]") -> None:
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ root_alert_groups_to_acknowledge = alert_groups.filter(
+ ~Q(acknowledged=True, resolved=False), # don't need to ack acknowledged incidents once again
+ root_alert_group__isnull=True,
+ maintenance_uuid__isnull=True, # don't ack maintenance incident
+ )
+ # Find all dependent alert_groups to update them in one query
+ dependent_alert_groups_to_acknowledge = AlertGroup.all_objects.filter(
+ root_alert_group__in=root_alert_groups_to_acknowledge
+ )
+ alert_groups_to_acknowledge = root_alert_groups_to_acknowledge | dependent_alert_groups_to_acknowledge
+
+ # it is needed to unserolve those alert_groups which were resolved to build proper log.
+ alert_groups_to_unresolve_before_acknowledge = alert_groups_to_acknowledge.filter(resolved=True)
+
+ # it is needed to unsilence those alert_groups which were silenced to build proper log.
+ alert_groups_to_unsilence_before_acknowledge = alert_groups_to_acknowledge.filter(silenced=True)
+
+ # convert current qs to list to prevent changes by update
+ alert_groups_to_acknowledge_list = list(alert_groups_to_acknowledge)
+ alert_groups_to_unresolve_before_acknowledge_list = list(alert_groups_to_unresolve_before_acknowledge)
+ alert_groups_to_unsilence_before_acknowledge_list = list(alert_groups_to_unsilence_before_acknowledge)
+
+ alert_groups_to_acknowledge.update(
+ acknowledged=True,
+ resolved=False,
+ resolved_at=None,
+ resolved_by=AlertGroup.NOT_YET,
+ resolved_by_user=None,
+ silenced_until=None,
+ silenced_by_user=None,
+ silenced_at=None,
+ silenced=False,
+ acknowledged_at=timezone.now(),
+ acknowledged_by_user=user,
+ acknowledged_by=AlertGroup.USER,
+ is_escalation_finished=True,
+ )
+
+ for alert_group in alert_groups_to_unresolve_before_acknowledge_list:
+ alert_group.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UN_RESOLVED,
+ author=user,
+ reason="Bulk action acknowledge",
+ )
+ # clear resolve report cache
+ alert_group.drop_cached_after_resolve_report_json()
+
+ for alert_group in alert_groups_to_unsilence_before_acknowledge_list:
+ alert_group.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UN_SILENCE, author=user, reason="Bulk action acknowledge"
+ )
+
+ for alert_group in alert_groups_to_acknowledge_list:
+
+ if alert_group.is_root_alert_group:
+ alert_group.start_ack_reminder(user)
+
+ if alert_group.can_call_ack_url:
+ alert_group.start_call_ack_url()
+
+ log_record = alert_group.log_records.create(type=AlertGroupLogRecord.TYPE_ACK, author=user)
+ send_alert_group_signal.apply_async((log_record.pk,))
+
+ @staticmethod
+ def bulk_resolve(user: User, alert_groups: "QuerySet[AlertGroup]") -> None:
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ # stop maintenance for maintenance incidents
+ alert_groups_to_stop_maintenance = alert_groups.filter(resolved=False, maintenance_uuid__isnull=False)
+ for alert_group in alert_groups_to_stop_maintenance:
+ alert_group.stop_maintenance(user)
+
+ root_alert_groups_to_resolve = alert_groups.filter(
+ resolved=False,
+ root_alert_group__isnull=True,
+ maintenance_uuid__isnull=True,
+ )
+ if root_alert_groups_to_resolve.count() == 0:
+ return
+
+ organization = root_alert_groups_to_resolve.first().channel.organization
+ if organization.is_resolution_note_required:
+ root_alert_groups_to_resolve = root_alert_groups_to_resolve.filter(
+ Q(resolution_notes__isnull=False, resolution_notes__deleted_at=None)
+ )
+ dependent_alert_groups_to_resolve = AlertGroup.all_objects.filter(
+ root_alert_group__in=root_alert_groups_to_resolve
+ )
+ alert_groups_to_resolve = root_alert_groups_to_resolve | dependent_alert_groups_to_resolve
+
+ # it is needed to unsilence those alert_groups which were silenced to build proper log.
+ alert_groups_to_unsilence_before_resolve = alert_groups_to_resolve.filter(silenced=True)
+
+ # convert current qs to list to prevent changes by update
+ alert_groups_to_resolve_list = list(alert_groups_to_resolve)
+ alert_groups_to_unsilence_before_resolve_list = list(alert_groups_to_unsilence_before_resolve)
+
+ alert_groups_to_resolve.update(
+ resolved=True,
+ resolved_at=timezone.now(),
+ is_open_for_grouping=None,
+ resolved_by_user=user,
+ resolved_by=AlertGroup.USER,
+ is_escalation_finished=True,
+ silenced_until=None,
+ silenced_by_user=None,
+ silenced_at=None,
+ silenced=False,
+ )
+
+ for alert_group in alert_groups_to_unsilence_before_resolve_list:
+ alert_group.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UN_SILENCE, author=user, reason="Bulk action resolve"
+ )
+
+ for alert_group in alert_groups_to_resolve_list:
+ log_record = alert_group.log_records.create(type=AlertGroupLogRecord.TYPE_RESOLVED, author=user)
+ send_alert_group_signal.apply_async((log_record.pk,))
+
+ @staticmethod
+ def bulk_restart(user: User, alert_groups: "QuerySet[AlertGroup]") -> None:
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ root_alert_groups_unack = alert_groups.filter(
+ resolved=False,
+ acknowledged=True,
+ root_alert_group__isnull=True,
+ maintenance_uuid__isnull=True, # don't restart maintenance incident
+ )
+ dependent_alert_groups_unack = AlertGroup.all_objects.filter(root_alert_group__in=root_alert_groups_unack)
+ alert_groups_to_restart_unack = root_alert_groups_unack | dependent_alert_groups_unack
+
+ root_alert_groups_unresolve = alert_groups.filter(resolved=True, root_alert_group__isnull=True)
+ dependent_alert_groups_unresolve = AlertGroup.all_objects.filter(
+ root_alert_group__in=root_alert_groups_unresolve
+ )
+ alert_groups_to_restart_unresolve = root_alert_groups_unresolve | dependent_alert_groups_unresolve
+
+ alert_groups_to_restart_unsilence = alert_groups.filter(
+ resolved=False,
+ acknowledged=False,
+ silenced=True,
+ root_alert_group__isnull=True,
+ )
+
+ # convert current qs to list to prevent changes by update
+ alert_groups_to_restart_unack_list = list(alert_groups_to_restart_unack)
+ alert_groups_to_restart_unresolve_list = list(alert_groups_to_restart_unresolve)
+ alert_groups_to_restart_unsilence_list = list(alert_groups_to_restart_unsilence)
+
+ alert_groups_to_restart = (
+ alert_groups_to_restart_unack | alert_groups_to_restart_unresolve | alert_groups_to_restart_unsilence
+ )
+
+ alert_groups_to_restart.update(
+ acknowledged=False,
+ acknowledged_at=None,
+ acknowledged_by_user=None,
+ acknowledged_by=AlertGroup.NOT_YET,
+ resolved=False,
+ resolved_at=None,
+ is_open_for_grouping=None,
+ resolved_by_user=None,
+ resolved_by=AlertGroup.NOT_YET,
+ silenced_until=None,
+ silenced_by_user=None,
+ silenced_at=None,
+ silenced=False,
+ )
+
+ # unresolve alert groups
+ for alert_group in alert_groups_to_restart_unresolve_list:
+ log_record = alert_group.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UN_RESOLVED,
+ author=user,
+ reason="Bulk action restart",
+ )
+
+ alert_group.drop_cached_after_resolve_report_json()
+
+ if alert_group.is_root_alert_group:
+ alert_group.start_escalation_if_needed()
+
+ send_alert_group_signal.apply_async((log_record.pk,))
+
+ # unacknowledge alert groups
+ for alert_group in alert_groups_to_restart_unack_list:
+ log_record = alert_group.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UN_ACK,
+ author=user,
+ reason="Bulk action restart",
+ )
+
+ if alert_group.is_root_alert_group:
+ alert_group.start_escalation_if_needed()
+
+ send_alert_group_signal.apply_async((log_record.pk,))
+
+ # unsilence alert groups
+ for alert_group in alert_groups_to_restart_unsilence_list:
+ log_record = alert_group.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UN_SILENCE, author=user, reason="Bulk action restart"
+ )
+ alert_group.start_escalation_if_needed()
+
+ send_alert_group_signal.apply_async((log_record.pk,))
+
+ @staticmethod
+ def bulk_silence(user: User, alert_groups: "QuerySet[AlertGroup]", silence_delay: int) -> None:
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ now = timezone.now()
+ silence_for_period = silence_delay is not None and silence_delay > 0
+
+ if silence_for_period:
+ silence_delay_timedelta = timezone.timedelta(seconds=silence_delay)
+ silenced_until = now + silence_delay_timedelta
+ else:
+ silence_delay_timedelta = None
+ silenced_until = None
+
+ root_alert_groups_to_silence = alert_groups.filter(
+ root_alert_group__isnull=True,
+ maintenance_uuid__isnull=True, # don't silence maintenance incident
+ )
+ dependent_alert_groups_to_silence = alert_groups.filter(root_alert_group__in=root_alert_groups_to_silence)
+ alert_groups_to_silence = root_alert_groups_to_silence | dependent_alert_groups_to_silence
+ alert_groups_to_unsilence_before_silence = alert_groups_to_silence.filter(
+ silenced=True, acknowledged=False, resolved=False
+ )
+ alert_groups_to_unacknowledge_before_silence = alert_groups_to_silence.filter(resolved=False, acknowledged=True)
+ alert_groups_to_unresolve_before_silence = alert_groups_to_silence.filter(resolved=True)
+
+ # convert current qs to list to prevent changes by update
+ alert_groups_to_silence_list = list(alert_groups_to_silence)
+ alert_groups_to_unsilence_before_silence_list = list(alert_groups_to_unsilence_before_silence)
+ alert_groups_to_unacknowledge_before_silence_list = list(alert_groups_to_unacknowledge_before_silence)
+ alert_groups_to_unresolve_before_silence_list = list(alert_groups_to_unresolve_before_silence)
+
+ if silence_for_period:
+ alert_groups_to_silence.update(
+ acknowledged=False,
+ acknowledged_at=None,
+ acknowledged_by_user=None,
+ acknowledged_by=AlertGroup.NOT_YET,
+ resolved=False,
+ resolved_at=None,
+ resolved_by_user=None,
+ resolved_by=AlertGroup.NOT_YET,
+ silenced=True,
+ silenced_at=now,
+ silenced_until=silenced_until,
+ silenced_by_user=user,
+ )
+ else:
+ alert_groups_to_silence.update(
+ acknowledged=False,
+ acknowledged_at=None,
+ acknowledged_by_user=None,
+ acknowledged_by=AlertGroup.NOT_YET,
+ resolved=False,
+ resolved_at=None,
+ resolved_by_user=None,
+ resolved_by=AlertGroup.NOT_YET,
+ silenced=True,
+ silenced_at=now,
+ silenced_until=silenced_until,
+ silenced_by_user=user,
+ is_escalation_finished=True,
+ )
+
+ for alert_group in alert_groups_to_unresolve_before_silence_list:
+ alert_group.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UN_RESOLVED,
+ author=user,
+ reason="Bulk action silence",
+ )
+ alert_group.drop_cached_after_resolve_report_json()
+
+ for alert_group in alert_groups_to_unsilence_before_silence_list:
+ alert_group.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UN_SILENCE,
+ author=user,
+ reason="Bulk action silence",
+ )
+
+ for alert_group in alert_groups_to_unacknowledge_before_silence_list:
+ alert_group.log_records.create(
+ type=AlertGroupLogRecord.TYPE_UN_ACK,
+ author=user,
+ reason="Bulk action silence",
+ )
+
+ for alert_group in alert_groups_to_silence_list:
+ log_record = alert_group.log_records.create(
+ type=AlertGroupLogRecord.TYPE_SILENCE,
+ author=user,
+ silence_delay=silence_delay_timedelta,
+ reason="Bulk action silence",
+ )
+
+ send_alert_group_signal.apply_async((log_record.pk,))
+ if silence_for_period and alert_group.is_root_alert_group:
+ alert_group.start_unsilence_task(countdown=silence_delay)
+
+ def start_ack_reminder(self, user: User):
+ Organization = apps.get_model("user_management", "Organization")
+ unique_unacknowledge_process_id = uuid1()
+ logger.info(
+ f"AlertGroup acknowledged by user with pk "
+ f"{user.pk}, "
+ f"acknowledge timeout task has been started with process id {unique_unacknowledge_process_id}"
+ )
+
+ seconds = Organization.ACKNOWLEDGE_REMIND_DELAY[self.channel.organization.acknowledge_remind_timeout]
+ if seconds > 0:
+ delay = timezone.timedelta(seconds=seconds).total_seconds()
+ acknowledge_reminder_task.apply_async(
+ (
+ self.pk,
+ unique_unacknowledge_process_id,
+ ),
+ countdown=delay,
+ )
+ self.last_unique_unacknowledge_process_id = unique_unacknowledge_process_id
+ self.save(update_fields=["last_unique_unacknowledge_process_id"])
+
+ def start_call_ack_url(self):
+ get_ack_url = self.alerts.first().integration_unique_data.get("ack_url_get", None)
+ channel_id = self.slack_message.channel_id if self.slack_message is not None else None
+ if get_ack_url and not self.acknowledged_on_source:
+ call_ack_url.apply_async(
+ (get_ack_url, self.pk, channel_id),
+ )
+ post_ack_url = self.alerts.first().integration_unique_data.get("ack_url_post", None)
+ if post_ack_url and not self.acknowledged_on_source:
+ call_ack_url.apply_async(
+ (post_ack_url, self.pk, channel_id, "POST"),
+ )
+
+ def start_unsilence_task(self, countdown):
+ task_id = celery_uuid()
+ self.unsilence_task_uuid = task_id
+
+ # recalculate finish escalation time
+ escalation_start_time = timezone.now() + timezone.timedelta(seconds=countdown)
+ self.estimate_escalation_finish_time = self.calculate_eta_for_finish_escalation(
+ start_time=escalation_start_time
+ )
+
+ self.save(update_fields=["unsilence_task_uuid", "estimate_escalation_finish_time"])
+ unsilence_task.apply_async((self.pk,), task_id=task_id, countdown=countdown)
+
+ @property
+ def can_call_ack_url(self):
+ return type(self.alerts.first().integration_unique_data) is dict
+
+ @property
+ def is_root_alert_group(self):
+ return self.root_alert_group is None
+
+ def acknowledge(self, **kwargs):
+ if not self.acknowledged:
+ self.acknowledged = True
+ self.acknowledged_at = timezone.now()
+
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+ self.save(update_fields=["acknowledged", "acknowledged_at", *kwargs.keys()])
+
+ def unacknowledge(self):
+ self.un_silence()
+ if self.acknowledged:
+ self.acknowledged = False
+ self.acknowledged_at = None
+ self.acknowledged_by_user = None
+ self.acknowledged_by = AlertGroup.NOT_YET
+ self.save(update_fields=["acknowledged", "acknowledged_at", "acknowledged_by_user", "acknowledged_by"])
+
+ def resolve(self, **kwargs):
+ if not self.resolved:
+ self.resolved = True
+ self.resolved_at = timezone.now()
+ self.is_open_for_grouping = None
+
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+ self.save(update_fields=["resolved", "resolved_at", "is_open_for_grouping", *kwargs.keys()])
+
+ def unresolve(self):
+ self.unacknowledge()
+ if self.resolved:
+ self.resolved = False
+ self.resolved_at = None
+ self.resolved_by = AlertGroup.NOT_YET
+ self.resolved_by_user = None
+ self.save(update_fields=["resolved", "resolved_at", "resolved_by", "resolved_by_user"])
+
+ def silence(self, **kwargs):
+ if not self.silenced:
+ self.silenced = True
+ if "silenced_at" not in kwargs:
+ kwargs["silenced_at"] = timezone.now()
+
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+ self.save(update_fields=["silenced", *kwargs.keys()])
+
+ def un_silence(self):
+ self.silenced_until = None
+ self.silenced_by_user = None
+ self.silenced_at = None
+ self.silenced = False
+ self.unsilence_task_uuid = None
+ self.save(
+ update_fields=["silenced_until", "silenced", "silenced_by_user", "silenced_at", "unsilence_task_uuid"]
+ )
+
+ def archive(self):
+ if self.root_alert_group:
+ self.root_alert_group = None
+ self.is_archived = True
+ self.save(update_fields=["is_archived", "root_alert_group"])
+
+ @property
+ def long_verbose_name(self):
+ title = str_or_backup(self.slack_templated_first_alert.title, DEFAULT_BACKUP_TITLE)
+ return title
+
+ @property
+ def long_verbose_name_without_formatting(self):
+ sf = SlackFormatter(self.channel.organization)
+ title = self.long_verbose_name
+ title = sf.format(title)
+ title = clean_markup(title)
+ return title
+
+ def get_resolve_text(self, mention_user=False):
+ if self.resolved_by == AlertGroup.SOURCE:
+ return "Resolved by alert source"
+ elif self.resolved_by == AlertGroup.ARCHIVED:
+ return "Resolved because alert has been archived"
+ elif self.resolved_by == AlertGroup.LAST_STEP:
+ return "Resolved automatically"
+ elif self.resolved_by == AlertGroup.WIPED:
+ return "Resolved by wipe"
+ elif self.resolved_by == AlertGroup.DISABLE_MAINTENANCE:
+ return "Resolved by stop maintenance"
+ else:
+ if self.resolved_by_user is not None:
+ user_text = self.resolved_by_user.get_user_verbal_for_team_for_slack(mention=mention_user)
+ return f"Resolved by {user_text}"
+ else:
+ return "Resolved"
+
+ def get_acknowledge_text(self, mention_user=False):
+ if self.acknowledged_by == AlertGroup.SOURCE:
+ return "Acknowledged by alert source"
+ elif self.acknowledged_by == AlertGroup.USER and self.acknowledged_by_user is not None:
+ user_text = self.acknowledged_by_user.get_user_verbal_for_team_for_slack(mention=mention_user)
+ return f"Acknowledged by {user_text}"
+ else:
+ return "Acknowledged"
+
+ def non_cached_after_resolve_report_json(self):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+ ResolutionNote = apps.get_model("alerts", "ResolutionNote")
+
+ log_builder = IncidentLogBuilder(self)
+ log_records_list = log_builder.get_log_records_list(with_resolution_notes=True)
+ result_log_report = list()
+
+ for log_record in log_records_list:
+ if type(log_record) == AlertGroupLogRecord:
+ result_log_report.append(log_record.render_log_line_json())
+ elif type(log_record) == UserNotificationPolicyLogRecord:
+ result_log_report.append(log_record.rendered_notification_log_line_json)
+ elif type(log_record) == ResolutionNote:
+ result_log_report.append(log_record.render_log_line_json())
+ return result_log_report
+
+ def render_after_resolve_report_json(self):
+ cache_key = "render_after_resolve_report_json_{}".format(self.pk)
+
+ # cache.get_or_set in some cases returns None, so use get and set cache methods separately
+ log_report = cache.get(cache_key)
+ if log_report is None:
+ log_report = self.non_cached_after_resolve_report_json()
+ cache.set(cache_key, log_report)
+ return log_report
+
+ def drop_cached_after_resolve_report_json(self):
+ cache_key = "render_after_resolve_report_json_{}".format(self.pk)
+ if cache_key in cache:
+ cache.delete(cache_key)
+
+ @property
+ def has_resolution_notes(self):
+ return self.resolution_notes.exists()
+
+ def render_resolution_notes_for_csv_report(self):
+ result = ""
+
+ resolution_notes = self.resolution_notes.all().prefetch_related("resolution_note_slack_message")
+ if len(resolution_notes) > 0:
+ result += "Notes: "
+ result += " ".join(
+ [
+ "{} ({} by {}), ".format(
+ resolution_note.text,
+ resolution_note.created_at.astimezone(pytz.utc),
+ resolution_note.author_verbal(mention=True),
+ )
+ for resolution_note in resolution_notes
+ ]
+ )
+ return result
+
+ @property
+ def state(self):
+ if self.resolved:
+ return "resolved"
+ elif self.acknowledged:
+ return "acknowledged"
+ elif self.silenced:
+ return "silenced"
+ else:
+ return "new"
+
+ @property
+ def notify_in_slack_enabled(self):
+ channel_filter = self.channel_filter_with_respect_to_escalation_snapshot
+ if channel_filter is not None:
+ return channel_filter.notify_in_slack
+ else:
+ return True
+
+ @property
+ def notify_in_telegram_enabled(self):
+ channel_filter = self.channel_filter_with_respect_to_escalation_snapshot
+ if channel_filter is not None:
+ return channel_filter.notify_in_telegram
+ else:
+ return True
+
+ @property
+ def is_presented_in_slack(self):
+ return self.slack_message and self.channel.organization.slack_team_identity
+
+ @property
+ def slack_channel_id(self):
+ slack_channel_id = None
+ if self.channel.organization.slack_team_identity is not None:
+ slack_message = self.get_slack_message()
+ if slack_message is not None:
+ slack_channel_id = slack_message.channel_id
+ elif self.channel_filter is not None:
+ slack_channel_id = self.channel_filter.slack_channel_id_or_general_log_id
+ return slack_channel_id
+
+ def get_slack_message(self):
+ SlackMessage = apps.get_model("slack", "SlackMessage")
+ if self.slack_message is None:
+ slack_message = SlackMessage.objects.filter(alert_group=self).order_by("created_at").first()
+ return slack_message
+ return self.slack_message
+
+ @cached_property
+ def last_stop_escalation_log(self):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ stop_escalation_log = (
+ self.log_records.filter(
+ type__in=[
+ AlertGroupLogRecord.TYPE_RESOLVED,
+ AlertGroupLogRecord.TYPE_ACK,
+ AlertGroupLogRecord.TYPE_SILENCE,
+ ]
+ )
+ .order_by("pk")
+ .last()
+ )
+
+ return stop_escalation_log
+
+
+@receiver(post_save, sender=AlertGroup)
+def listen_for_alert_group_model_save(sender, instance, created, *args, **kwargs):
+ if (
+ kwargs is not None
+ and "update_fields" in kwargs
+ and kwargs["update_fields"] is dict
+ and "cached_render_for_web" not in kwargs["update_fields"]
+ ):
+ transaction.on_commit(instance.schedule_cache_for_alert_group)
diff --git a/engine/apps/alerts/models/alert_group_counter.py b/engine/apps/alerts/models/alert_group_counter.py
new file mode 100644
index 0000000000..c5be00e107
--- /dev/null
+++ b/engine/apps/alerts/models/alert_group_counter.py
@@ -0,0 +1,30 @@
+from django.db import models
+
+
+class ConcurrentUpdateError(Exception):
+ pass
+
+
+class AlertGroupCounterQuerySet(models.QuerySet):
+ def get_value(self, organization):
+ counter, _ = self.get_or_create(organization=organization)
+
+ num_updated_rows = self.filter(organization=organization, value=counter.value).update(value=counter.value + 1)
+ if num_updated_rows == 0:
+ raise ConcurrentUpdateError()
+
+ return counter.value
+
+
+class AlertGroupCounter(models.Model):
+ """
+ This model is used to assign unique, increasing inside_organization_number's for alert groups.
+ It uses optimistic locking to get values and raises ConcurrentUpdateError exception in case of concurrent updates.
+ This is used on alert group creation in order to not block Celery workers with select_for_update and give space
+ to other tasks to run in case of high load on alert group creation.
+ """
+
+ objects = models.Manager.from_queryset(AlertGroupCounterQuerySet)()
+
+ organization = models.OneToOneField("user_management.Organization", on_delete=models.CASCADE)
+ value = models.PositiveBigIntegerField(default=0)
diff --git a/engine/apps/alerts/models/alert_group_log_record.py b/engine/apps/alerts/models/alert_group_log_record.py
new file mode 100644
index 0000000000..7e5e30c938
--- /dev/null
+++ b/engine/apps/alerts/models/alert_group_log_record.py
@@ -0,0 +1,560 @@
+import json
+import logging
+
+import humanize
+from django.apps import apps
+from django.db import models, transaction
+from django.db.models import JSONField
+from django.db.models.signals import post_save
+from django.dispatch import receiver
+from rest_framework.fields import DateTimeField
+
+from apps.alerts.tasks import send_update_log_report_signal
+from apps.alerts.utils import render_relative_timeline
+from apps.slack.slack_formatter import SlackFormatter
+from common.utils import clean_markup
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+class AlertGroupLogRecord(models.Model):
+ (
+ TYPE_ACK,
+ TYPE_UN_ACK,
+ TYPE_INVITE,
+ TYPE_STOP_INVITATION,
+ TYPE_RE_INVITE,
+ TYPE_ESCALATION_TRIGGERED,
+ TYPE_INVITATION_TRIGGERED,
+ TYPE_SILENCE,
+ TYPE_ATTACHED,
+ TYPE_UNATTACHED,
+ TYPE_CUSTOM_BUTTON_TRIGGERED,
+ TYPE_AUTO_UN_ACK,
+ TYPE_FAILED_ATTACHMENT,
+ TYPE_RESOLVED,
+ TYPE_UN_RESOLVED,
+ TYPE_UN_SILENCE,
+ TYPE_ESCALATION_FINISHED,
+ TYPE_ESCALATION_FAILED,
+ TYPE_ACK_REMINDER_TRIGGERED,
+ TYPE_WIPED,
+ TYPE_DELETED,
+ TYPE_REGISTERED,
+ TYPE_ROUTE_ASSIGNED,
+ ) = range(23)
+
+ TYPES_FOR_LICENCE_CALCULATION = (
+ TYPE_ACK,
+ TYPE_UN_ACK,
+ TYPE_INVITE,
+ TYPE_STOP_INVITATION,
+ TYPE_RE_INVITE,
+ TYPE_SILENCE,
+ TYPE_ATTACHED,
+ TYPE_UNATTACHED,
+ TYPE_CUSTOM_BUTTON_TRIGGERED,
+ TYPE_FAILED_ATTACHMENT,
+ TYPE_RESOLVED,
+ TYPE_UN_RESOLVED,
+ TYPE_UN_SILENCE,
+ )
+
+ TYPE_CHOICES = (
+ (TYPE_ACK, "Acknowledged"),
+ (TYPE_UN_ACK, "Unacknowledged"),
+ (TYPE_INVITE, "Invite"),
+ (TYPE_STOP_INVITATION, "Stop invitation"),
+ (TYPE_RE_INVITE, "Re-invite"),
+ (TYPE_ESCALATION_TRIGGERED, "Escalation triggered"),
+ (TYPE_INVITATION_TRIGGERED, "Invitation triggered"),
+ (TYPE_ESCALATION_FINISHED, "Escalation finished"),
+ (TYPE_SILENCE, "Silenced"),
+ (TYPE_UN_SILENCE, "Unsilenced"),
+ (TYPE_ATTACHED, "Attached"),
+ (TYPE_UNATTACHED, "Unattached"),
+ (TYPE_CUSTOM_BUTTON_TRIGGERED, "Custom button triggered"),
+ (TYPE_AUTO_UN_ACK, "Unacknowledged by timeout"),
+ (TYPE_FAILED_ATTACHMENT, "Failed attachment"),
+ (TYPE_RESOLVED, "Incident resolved"),
+ (TYPE_UN_RESOLVED, "Incident unresolved"),
+ (TYPE_ESCALATION_FAILED, "Escalation failed"),
+ (TYPE_ACK_REMINDER_TRIGGERED, "Acknowledge reminder triggered"),
+ (TYPE_WIPED, "Wiped"),
+ (TYPE_DELETED, "Deleted"),
+ (TYPE_REGISTERED, "Incident registered"),
+ (TYPE_ROUTE_ASSIGNED, "A route is assigned to the incident"),
+ )
+
+ # Handlers should be named like functions.
+ ACTIONS_TO_HANDLERS_MAP = {
+ TYPE_ACK: "acknowledge",
+ TYPE_UN_ACK: "un_acknowledge",
+ TYPE_INVITE: "invite",
+ TYPE_STOP_INVITATION: "un_invite",
+ TYPE_RE_INVITE: "re_invite",
+ TYPE_ESCALATION_TRIGGERED: "escalation_triggered",
+ TYPE_INVITATION_TRIGGERED: "invitation_triggered",
+ TYPE_SILENCE: "silence",
+ TYPE_UN_SILENCE: "un_silence",
+ TYPE_ATTACHED: "attach",
+ TYPE_UNATTACHED: "un_attach",
+ TYPE_CUSTOM_BUTTON_TRIGGERED: "custom_button_triggered",
+ TYPE_AUTO_UN_ACK: "auto_un_acknowledge",
+ TYPE_FAILED_ATTACHMENT: "fail_attach",
+ TYPE_RESOLVED: "resolve",
+ TYPE_UN_RESOLVED: "un_resolve",
+ TYPE_ESCALATION_FINISHED: "escalation_finished",
+ TYPE_ESCALATION_FAILED: "escalation_failed",
+ TYPE_ACK_REMINDER_TRIGGERED: "ack_reminder_triggered",
+ TYPE_WIPED: "wiped",
+ TYPE_DELETED: "deleted",
+ }
+ (
+ ERROR_ESCALATION_NOTIFY_USER_NO_RECIPIENT,
+ ERROR_ESCALATION_NOTIFY_QUEUE_NO_RECIPIENTS,
+ ERROR_ESCALATION_NOTIFY_MULTIPLE_NO_RECIPIENTS,
+ ERROR_ESCALATION_SCHEDULE_DOES_NOT_EXIST,
+ ERROR_ESCALATION_SCHEDULE_DOES_NOT_SELECTED,
+ ERROR_ESCALATION_ICAL_IMPORT_FAILED,
+ ERROR_ESCALATION_ICAL_NO_VALID_USERS,
+ ERROR_ESCALATION_NO_SCHEDULE_IN_CHANNEL,
+ ERROR_ESCALATION_WAIT_STEP_IS_NOT_CONFIGURED,
+ ERROR_ESCALATION_NOTIFY_IF_TIME_IS_NOT_CONFIGURED,
+ ERROR_ESCALATION_UNSPECIFIED_STEP,
+ ERROR_ESCALATION_NOTIFY_GROUP_STEP_IS_NOT_CONFIGURED,
+ ERROR_ESCALATION_USER_GROUP_IS_EMPTY,
+ ERROR_ESCALATION_USER_GROUP_DOES_NOT_EXIST,
+ ERROR_ESCALATION_TRIGGER_CUSTOM_BUTTON_STEP_IS_NOT_CONFIGURED,
+ ERROR_ESCALATION_NOTIFY_IN_SLACK,
+ ERROR_ESCALATION_NOTIFY_IF_NUM_ALERTS_IN_WINDOW_STEP_IS_NOT_CONFIGURED,
+ ) = range(17)
+
+ type = models.IntegerField(choices=TYPE_CHOICES)
+
+ author = models.ForeignKey(
+ "user_management.User",
+ on_delete=models.SET_NULL,
+ related_name="log_records",
+ default=None,
+ null=True,
+ )
+
+ escalation_policy = models.ForeignKey(
+ "alerts.EscalationPolicy", on_delete=models.SET_NULL, related_name="log_records", null=True
+ )
+
+ created_at = models.DateTimeField(auto_now_add=True)
+ alert_group = models.ForeignKey(
+ "alerts.AlertGroup",
+ on_delete=models.CASCADE,
+ related_name="log_records",
+ )
+ root_alert_group = models.ForeignKey(
+ "alerts.AlertGroup",
+ on_delete=models.SET_NULL,
+ related_name="root_log_records",
+ default=None,
+ null=True,
+ )
+ dependent_alert_group = models.ForeignKey(
+ "alerts.AlertGroup",
+ on_delete=models.SET_NULL,
+ related_name="dependent_log_records",
+ default=None,
+ null=True,
+ )
+ invitation = models.ForeignKey(
+ "alerts.Invitation",
+ on_delete=models.SET_NULL,
+ related_name="log_records",
+ default=None,
+ null=True,
+ )
+ custom_button = models.ForeignKey(
+ "alerts.CustomButton",
+ on_delete=models.SET_DEFAULT,
+ related_name="log_records",
+ default=None,
+ null=True,
+ )
+ reason = models.TextField(null=True, default=None)
+
+ silence_delay = models.DurationField(default=None, null=True)
+
+ eta = models.DateTimeField(default=None, null=True)
+
+ escalation_error_code = models.PositiveIntegerField(null=True, default=None)
+
+ escalation_policy_step = models.IntegerField(null=True, default=None)
+ step_specific_info = JSONField(null=True, default=None)
+
+ STEP_SPECIFIC_INFO_KEYS = ["schedule_name", "custom_button_name", "usergroup_handle"]
+
+ def render_log_line_json(self):
+ time = humanize.naturaldelta(self.alert_group.started_at - self.created_at)
+ created_at = DateTimeField().to_representation(self.created_at)
+ author = self.author.short() if self.author is not None else None
+
+ sf = SlackFormatter(self.alert_group.channel.organization)
+ action = sf.format(self.rendered_log_line_action(substitute_author_with_tag=True))
+ action = clean_markup(action)
+
+ result = {
+ "time": time,
+ "action": action,
+ "realm": "alert_group",
+ "type": self.type,
+ "created_at": created_at,
+ "author": author,
+ }
+ return result
+
+ def rendered_incident_log_line(self, for_slack=False, html=False):
+ timeline = render_relative_timeline(self.created_at, self.alert_group.started_at)
+
+ if html:
+ result = f"{timeline}: "
+ else:
+ result = f"*{timeline}:* "
+
+ result += self.rendered_log_line_action(for_slack=for_slack, html=html)
+ return result
+
+ def rendered_log_line_action(self, for_slack=False, html=False, substitute_author_with_tag=False):
+ EscalationPolicy = apps.get_model("alerts", "EscalationPolicy")
+
+ result = ""
+ author_name = None
+ invitee_name = None
+ escalation_policy_step = None
+ step_specific_info = self.get_step_specific_info()
+
+ if self.escalation_policy_step is not None:
+ escalation_policy_step = self.escalation_policy_step
+ elif self.escalation_policy is not None:
+ escalation_policy_step = self.escalation_policy.step
+
+ if self.author is not None:
+ if substitute_author_with_tag:
+ author_name = "{{author}}"
+ elif for_slack:
+ author_name = self.author.get_user_verbal_for_team_for_slack()
+ else:
+ author_name = self.author.username
+ if self.invitation is not None:
+ if for_slack:
+ invitee_name = self.invitation.invitee.get_user_verbal_for_team_for_slack()
+ else:
+ invitee_name = self.invitation.invitee.username
+
+ if self.type == AlertGroupLogRecord.TYPE_REGISTERED:
+ result += "alert group registered"
+ elif self.type == AlertGroupLogRecord.TYPE_ROUTE_ASSIGNED:
+ channel_filter = self.alert_group.channel_filter_with_respect_to_escalation_snapshot
+ escalation_chain = self.alert_group.escalation_chain_with_respect_to_escalation_snapshot
+
+ if channel_filter is not None:
+ result += f'alert group assigned to route "{channel_filter.str_for_clients}"'
+
+ if escalation_chain is not None:
+ result += f' with escalation chain "{escalation_chain.name}"'
+ else:
+ result += f" with no escalation chain, skipping escalation"
+ else:
+ result += "alert group assigned to deleted route, skipping escalation"
+ elif self.type == AlertGroupLogRecord.TYPE_ACK:
+ result += f"acknowledged by {f'{author_name}' if author_name else 'alert source'}"
+ elif self.type == AlertGroupLogRecord.TYPE_UN_ACK:
+ result += f"unacknowledged by {author_name}"
+ elif self.type == AlertGroupLogRecord.TYPE_AUTO_UN_ACK:
+ result += "unacknowledged automatically"
+ elif self.type == AlertGroupLogRecord.TYPE_INVITE:
+ result += f"{author_name} activated invitation for {invitee_name}"
+ elif self.type == AlertGroupLogRecord.TYPE_STOP_INVITATION:
+ if self.invitation.invitee == self.author:
+ result += f"{author_name} deactivated invitation"
+ else:
+ result += f"{author_name} deactivated invitation for {invitee_name}"
+ elif self.type == AlertGroupLogRecord.TYPE_RE_INVITE:
+ result += f"{author_name} restarted invitation for {invitee_name}"
+ elif self.type == AlertGroupLogRecord.TYPE_INVITATION_TRIGGERED:
+ pass # moved to UserNotificationPolicyLogRecord
+ elif self.type == AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED:
+ if escalation_policy_step == EscalationPolicy.STEP_NOTIFY_IF_TIME:
+ if self.eta is not None:
+ if for_slack:
+ result += "escalation stopped until ".format(
+ self.eta.timestamp()
+ )
+ else:
+ result += f"escalation stopped until {self.eta.strftime('%B %d %Y %H:%M:%S')} (UTC)"
+ else:
+ result += 'triggered step "Continue escalation if time"'
+ elif escalation_policy_step == EscalationPolicy.STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW:
+ is_step_configured = (
+ self.escalation_policy is not None
+ and self.escalation_policy.num_alerts_in_window is not None
+ and self.escalation_policy.num_minutes_in_window is not None
+ )
+
+ if is_step_configured:
+ num_alerts_in_window = self.escalation_policy.num_alerts_in_window
+ num_minutes_in_window = self.escalation_policy.num_minutes_in_window
+ result += (
+ f'triggered step "Continue escalation if >{num_alerts_in_window} alerts '
+ f'per {num_minutes_in_window} minutes"'
+ )
+ else:
+ result += 'triggered step "Continue escalation if >X alerts per Y minutes"'
+
+ elif escalation_policy_step in [
+ EscalationPolicy.STEP_NOTIFY_GROUP,
+ EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT,
+ ]:
+ usergroup_handle = ""
+ if step_specific_info is not None:
+ usergroup_handle = step_specific_info.get("usergroup_handle", "")
+ elif self.escalation_policy is not None and self.escalation_policy.notify_to_group is not None:
+ usergroup_handle = self.escalation_policy.notify_to_group.handle
+ important_text = ""
+ if escalation_policy_step == EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT:
+ important_text = " (Important)"
+ result += f'triggered step "Notify @{usergroup_handle} User Group{important_text}"'
+ elif escalation_policy_step in [
+ EscalationPolicy.STEP_NOTIFY_SCHEDULE,
+ EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT,
+ ]:
+ schedule_name = None
+ if step_specific_info is not None:
+ schedule_name = step_specific_info.get("schedule_name", "")
+ elif self.escalation_policy is not None and self.escalation_policy.notify_schedule is not None:
+ schedule_name = self.escalation_policy.notify_schedule.name
+ schedule_name = f"'{schedule_name}'" if schedule_name else ""
+ important_text = ""
+ if escalation_policy_step == EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT:
+ important_text = " (Important)"
+ result += f'triggered step "Notify on-call from Schedule {schedule_name}{important_text}"'
+ elif escalation_policy_step == EscalationPolicy.STEP_REPEAT_ESCALATION_N_TIMES:
+ result += "escalation started from the beginning"
+ else:
+ result += f'triggered step "{EscalationPolicy.get_step_display_name(escalation_policy_step)}"'
+ elif self.type == AlertGroupLogRecord.TYPE_SILENCE:
+ if self.silence_delay is None:
+ result += f"silenced by {author_name} forever"
+ else:
+ if self.silence_delay.total_seconds() == 0:
+ # Before renaming snooze to silence and implementation of silence without time limit zero delay ment unsnooze
+ result += f"unsilenced by {author_name}"
+ else:
+ result += f"silenced by {author_name} for {humanize.naturaldelta(self.silence_delay)}"
+ elif self.type == AlertGroupLogRecord.TYPE_UN_SILENCE:
+ if self.author is not None:
+ result += f"unsilenced by {author_name}"
+ else:
+ result += "alert group unsilenced"
+ elif self.type == AlertGroupLogRecord.TYPE_ATTACHED:
+ # Log record of dependent alert group
+ if self.root_alert_group:
+ if self.alert_group.slack_message is not None and self.root_alert_group.slack_message is not None:
+ if html:
+ result += (
+ f"attached to "
+ f"{self.root_alert_group.long_verbose_name_without_formatting} by {author_name}"
+ )
+ else:
+ result += (
+ f"attached to <{self.root_alert_group.slack_message.permalink}|"
+ f"{self.root_alert_group.long_verbose_name_without_formatting}> by {author_name}"
+ )
+ else:
+ result += f"attached to {self.root_alert_group.long_verbose_name} by {author_name}"
+ # Log record of root alert group
+ elif self.dependent_alert_group:
+ if self.alert_group.slack_message is not None and self.dependent_alert_group.slack_message is not None:
+ if html:
+ result += (
+ f""
+ f"{self.dependent_alert_group.long_verbose_name_without_formatting} has been attached to this alert "
+ f"by {author_name}"
+ )
+ else:
+ result += (
+ f"<{self.dependent_alert_group.slack_message.permalink}|"
+ f"{self.dependent_alert_group.long_verbose_name_without_formatting}> has been attached to this alert "
+ f"by {author_name or 'maintenance'}"
+ )
+ else:
+ result += (
+ f"{self.dependent_alert_group.long_verbose_name} has been attached to this alert "
+ f"by {author_name or 'maintenance'}"
+ )
+ elif self.type == AlertGroupLogRecord.TYPE_UNATTACHED:
+ if self.root_alert_group:
+ if self.alert_group.slack_message is not None and self.root_alert_group.slack_message is not None:
+ if html:
+ result += (
+ f"unattached from "
+ f"{self.root_alert_group.long_verbose_name_without_formatting} "
+ f"{f' by {author_name}' if author_name else ''}"
+ )
+ else:
+ result += (
+ f"unattached from <{self.root_alert_group.slack_message.permalink}|"
+ f"{self.root_alert_group.long_verbose_name_without_formatting}>"
+ f"{f' by {author_name}' if author_name else ''}"
+ )
+ else:
+ result += (
+ f"unattached from {self.root_alert_group.long_verbose_name}"
+ f"{f' by {author_name}' if author_name else ''}"
+ )
+ elif self.dependent_alert_group:
+ if self.alert_group.slack_message is not None and self.dependent_alert_group.slack_message is not None:
+ if html:
+ result += (
+ f""
+ f"{self.dependent_alert_group.long_verbose_name_without_formatting} has been unattached from this alert"
+ f"{f' by {author_name}' if author_name else ''}"
+ )
+ else:
+ result += (
+ f"<{self.dependent_alert_group.slack_message.permalink}|"
+ f"{self.dependent_alert_group.long_verbose_name_without_formatting}> has been unattached from this alert"
+ f"{f' by {author_name}' if author_name else ''}"
+ )
+ else:
+ result += (
+ f"{self.dependent_alert_group.long_verbose_name} has been unattached from this alert"
+ f"{f' by {author_name}' if author_name else ''}"
+ )
+ elif self.type == AlertGroupLogRecord.TYPE_CUSTOM_BUTTON_TRIGGERED:
+ if step_specific_info is not None:
+ custom_button_name = step_specific_info.get("custom_button_name")
+ custom_button_name = f"`{custom_button_name}`" or ""
+ elif self.custom_button is not None:
+ custom_button_name = f"`{self.custom_button.name}`"
+ else:
+ custom_button_name = ""
+ result += f"outgoing webhook {custom_button_name} triggered by "
+ if self.author:
+ result += f"{author_name}"
+ else:
+ result += "escalation chain"
+ elif self.type == AlertGroupLogRecord.TYPE_FAILED_ATTACHMENT:
+ if self.alert_group.slack_message is not None:
+ result += (
+ f"failed to attach to <{self.root_alert_group.slack_message.permalink}|"
+ f"{self.root_alert_group.long_verbose_name_without_formatting}> "
+ f"by {author_name} because it is already attached or resolved."
+ )
+ else:
+ result += (
+ f"failed to attach to {self.root_alert_group.long_verbose_name} by {author_name}"
+ f"because it is already attached or resolved."
+ )
+ elif self.type == AlertGroupLogRecord.TYPE_RESOLVED:
+ result += f"alert group resolved {f'by {author_name}'if author_name else ''}"
+ elif self.type == AlertGroupLogRecord.TYPE_UN_RESOLVED:
+ result += f"unresolved by {author_name}"
+ elif self.type == AlertGroupLogRecord.TYPE_WIPED:
+ result += "wiped"
+ elif self.type == AlertGroupLogRecord.TYPE_ESCALATION_FAILED:
+ if self.escalation_error_code == AlertGroupLogRecord.ERROR_ESCALATION_NOTIFY_USER_NO_RECIPIENT:
+ result += 'skipped escalation step "Notify User" because no users are set'
+ elif self.escalation_error_code == AlertGroupLogRecord.ERROR_ESCALATION_NOTIFY_QUEUE_NO_RECIPIENTS:
+ result += 'skipped escalation step "Notify User (next each time)" because no users are set'
+ elif self.escalation_error_code == AlertGroupLogRecord.ERROR_ESCALATION_NOTIFY_MULTIPLE_NO_RECIPIENTS:
+ result += 'skipped escalation step "Notify multiple Users" because no users are set'
+ elif self.escalation_error_code in [
+ AlertGroupLogRecord.ERROR_ESCALATION_SCHEDULE_DOES_NOT_EXIST,
+ AlertGroupLogRecord.ERROR_ESCALATION_NO_SCHEDULE_IN_CHANNEL,
+ ]:
+ result += 'skipped escalation step "Notify Schedule" because schedule doesn\'t exist'
+ elif self.escalation_error_code == AlertGroupLogRecord.ERROR_ESCALATION_SCHEDULE_DOES_NOT_SELECTED:
+ result += 'skipped escalation step "Notify Schedule" because it is not configured'
+ elif self.escalation_error_code == AlertGroupLogRecord.ERROR_ESCALATION_NOTIFY_GROUP_STEP_IS_NOT_CONFIGURED:
+ result += 'skipped escalation step "Notify Group" because it is not configured'
+ elif (
+ self.escalation_error_code
+ == AlertGroupLogRecord.ERROR_ESCALATION_TRIGGER_CUSTOM_BUTTON_STEP_IS_NOT_CONFIGURED
+ ):
+ result += 'skipped escalation step "Trigger Outgoing Webhook" because it is not configured'
+ elif self.escalation_error_code == AlertGroupLogRecord.ERROR_ESCALATION_NOTIFY_IF_TIME_IS_NOT_CONFIGURED:
+ result += 'skipped escalation step "Continue escalation if time" because it is not configured'
+ elif (
+ self.escalation_error_code
+ == AlertGroupLogRecord.ERROR_ESCALATION_NOTIFY_IF_NUM_ALERTS_IN_WINDOW_STEP_IS_NOT_CONFIGURED
+ ):
+ result += 'skipped escalation step"Continue escalation if >X alerts per Y minutes" because it is not configured'
+ elif self.escalation_error_code == AlertGroupLogRecord.ERROR_ESCALATION_ICAL_IMPORT_FAILED:
+ if self.escalation_policy is not None and self.escalation_policy.notify_schedule is not None:
+ schedule_name = self.escalation_policy.notify_schedule.name
+ schedule_name = f' "{schedule_name}" '
+ else:
+ schedule_name = " "
+ result += f'escalation step "Notify Schedule"{schedule_name} skipped: iCal import was failed.'
+ elif self.escalation_error_code == AlertGroupLogRecord.ERROR_ESCALATION_ICAL_NO_VALID_USERS:
+ if self.escalation_policy is not None and self.escalation_policy.notify_schedule is not None:
+ schedule_name = self.escalation_policy.notify_schedule.name
+ schedule_name = f' "{schedule_name}" '
+ else:
+ schedule_name = " "
+ result += (
+ f'escalation step "Notify Schedule" {schedule_name} skipped:'
+ f" there are no users to notify for this schedule slot."
+ )
+ elif self.escalation_error_code == AlertGroupLogRecord.ERROR_ESCALATION_WAIT_STEP_IS_NOT_CONFIGURED:
+ result += 'escalation step "Wait" is not configured. ' "Default delay is 5 minutes."
+ elif self.escalation_error_code == AlertGroupLogRecord.ERROR_ESCALATION_USER_GROUP_IS_EMPTY:
+ if self.escalation_policy is not None:
+ group_name = f" "
+ else:
+ group_name = " "
+ result += f'escalation step "Notify Group"{group_name}skipped: User Group is empty.'
+ elif self.escalation_error_code == AlertGroupLogRecord.ERROR_ESCALATION_USER_GROUP_DOES_NOT_EXIST:
+ result += 'escalation step "Notify Group" skipped: User Group does not exist.'
+ elif self.escalation_error_code == AlertGroupLogRecord.ERROR_ESCALATION_UNSPECIFIED_STEP:
+ result += "escalation step is unspecified. Skipped"
+ elif self.escalation_error_code == AlertGroupLogRecord.ERROR_ESCALATION_NOTIFY_IN_SLACK:
+ if self.escalation_policy_step == EscalationPolicy.STEP_FINAL_NOTIFYALL:
+ result += "failed to notify channel in Slack"
+ elif self.escalation_policy_step in [
+ EscalationPolicy.STEP_NOTIFY_GROUP,
+ EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT,
+ ]:
+ usergroup_handle = None
+ if step_specific_info is not None:
+ usergroup_handle = self.step_specific_info.get("usergroup_handle", "")
+ elif self.escalation_policy is not None and self.escalation_policy.notify_to_group is not None:
+ usergroup_handle = self.escalation_policy.notify_to_group.handle
+ usergroup_handle_text = f" @{usergroup_handle}" if usergroup_handle else ""
+ result += f"failed to notify User Group{usergroup_handle_text} in Slack"
+ return result
+
+ def get_step_specific_info(self):
+ step_specific_info = None
+ # in some cases step_specific_info was saved with using json.dumps
+ if self.step_specific_info is not None:
+ if isinstance(self.step_specific_info, dict):
+ step_specific_info = self.step_specific_info
+ else:
+ step_specific_info = json.loads(self.step_specific_info)
+ return step_specific_info
+
+
+@receiver(post_save, sender=AlertGroupLogRecord)
+def listen_for_alertgrouplogrecord(sender, instance, created, *args, **kwargs):
+ instance.alert_group.drop_cached_after_resolve_report_json()
+ if instance.type != AlertGroupLogRecord.TYPE_DELETED:
+ if not instance.alert_group.is_maintenance_incident:
+ alert_group_pk = instance.alert_group.pk
+ logger.debug(
+ f"send_update_log_report_signal for alert_group {alert_group_pk}, "
+ f"alert group event: {instance.get_type_display()}"
+ )
+ send_update_log_report_signal.apply_async(kwargs={"alert_group_pk": alert_group_pk}, countdown=8)
+
+ logger.info(f"Recalculate AG cache. Reason: save alert_group_log_record model {instance.pk}")
+ transaction.on_commit(instance.alert_group.schedule_cache_for_web)
diff --git a/engine/apps/alerts/models/alert_manager_models.py b/engine/apps/alerts/models/alert_manager_models.py
new file mode 100644
index 0000000000..479e87ccbd
--- /dev/null
+++ b/engine/apps/alerts/models/alert_manager_models.py
@@ -0,0 +1,64 @@
+import hashlib
+import json
+
+from django.db import transaction
+
+from apps.alerts.models import Alert, AlertGroup
+
+
+class AlertGroupForAlertManager(AlertGroup):
+ def is_alert_a_resolve_signal(self, alert):
+ non_resolved_hashes = set()
+ hash = alert.get_integration_optimization_hash()
+ if alert.calculated_is_resolve_signal:
+ # Calculate leftover hashes
+ for alert in AlertForAlertManager.objects.filter(group=self).exclude(pk=alert.pk).all():
+ if alert.calculated_is_resolve_signal:
+ try:
+ non_resolved_hashes.remove(alert.get_integration_optimization_hash())
+ except KeyError:
+ pass
+ else:
+ non_resolved_hashes.add(alert.get_integration_optimization_hash())
+ # Remove last hash
+ try:
+ non_resolved_hashes.remove(hash)
+ except KeyError:
+ pass
+ return len(non_resolved_hashes) == 0
+ else:
+ return False
+
+ class Meta:
+ app_label = "alerts"
+ proxy = True
+
+
+class AlertForAlertManager(Alert):
+ def get_integration_optimization_hash(self):
+ if self.integration_optimization_hash is None:
+ with transaction.atomic():
+ if self.id is not None:
+ alert = AlertForAlertManager.objects.filter(id=self.id).select_for_update().get()
+ else:
+ alert = self
+
+ _hash = dict(alert.raw_request_data.get("labels", {}))
+ _hash = json.dumps(_hash, sort_keys=True)
+ _hash = hashlib.md5(str(_hash).encode()).hexdigest()
+ alert.integration_optimization_hash = _hash
+
+ if self.id is not None:
+ alert.save()
+
+ return alert.integration_optimization_hash
+ else:
+ return self.integration_optimization_hash
+
+ @property
+ def calculated_is_resolve_signal(self):
+ return self.raw_request_data.get("status", "") == "resolved"
+
+ class Meta:
+ app_label = "alerts"
+ proxy = True
diff --git a/engine/apps/alerts/models/alert_receive_channel.py b/engine/apps/alerts/models/alert_receive_channel.py
new file mode 100644
index 0000000000..a2a9dad7d7
--- /dev/null
+++ b/engine/apps/alerts/models/alert_receive_channel.py
@@ -0,0 +1,716 @@
+import logging
+from functools import cached_property
+from urllib.parse import urljoin
+
+import emoji
+from celery import uuid as celery_uuid
+from django.apps import apps
+from django.conf import settings
+from django.core.validators import MinLengthValidator
+from django.db import models, transaction
+from django.db.models import Count, Q
+from django.db.models.signals import post_save
+from django.dispatch import receiver
+from django.utils import timezone
+from django.utils.crypto import get_random_string
+from emoji import emojize
+from jinja2 import Template
+
+from apps.alerts.grafana_alerting_sync_manager.grafana_alerting_sync import GrafanaAlertingSyncManager
+from apps.alerts.integration_options_mixin import IntegrationOptionsMixin
+from apps.alerts.models.maintainable_object import MaintainableObject
+from apps.alerts.tasks import (
+ disable_maintenance,
+ invalidate_web_cache_for_alert_group,
+ sync_grafana_alerting_contact_points,
+)
+from apps.base.messaging import get_messaging_backend_from_id
+from apps.base.utils import live_settings
+from apps.integrations.metadata import heartbeat
+from apps.integrations.tasks import create_alert, create_alertmanager_alerts
+from apps.slack.constants import SLACK_RATE_LIMIT_DELAY, SLACK_RATE_LIMIT_TIMEOUT
+from apps.slack.tasks import post_slack_rate_limit_message
+from apps.slack.utils import post_message_to_channel
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.exceptions import TeamCanNotBeChangedError, UnableToSendDemoAlert
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+
+logger = logging.getLogger(__name__)
+
+
+def generate_public_primary_key_for_alert_receive_channel():
+ prefix = "C"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while AlertReceiveChannel.objects_with_deleted.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="AlertReceiveChannel"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+def random_token_generator():
+ return get_random_string(length=25)
+
+
+def number_to_smiles_translator(number):
+ smiles = [
+ ":blush:",
+ ":ghost:",
+ ":apple:",
+ ":heart:",
+ ":sunglasses:",
+ ":package:",
+ ":balloon:",
+ ":bell:",
+ ":beer:",
+ ":fire:",
+ ]
+ smileset = []
+ first = True
+ while number > 0 or first:
+ smileset.append(smiles[number % 10])
+ number //= 10
+ first = False
+ return "".join(reversed(smileset))
+
+
+class AlertReceiveChannelQueryset(models.QuerySet):
+ def delete(self):
+ self.update(deleted_at=timezone.now())
+
+
+class AlertReceiveChannelManager(models.Manager):
+ def get_queryset(self):
+ return AlertReceiveChannelQueryset(self.model, using=self._db).filter(
+ ~Q(integration=AlertReceiveChannel.INTEGRATION_MAINTENANCE), Q(deleted_at=None)
+ )
+
+ def hard_delete(self):
+ return self.get_queryset().hard_delete()
+
+
+class AlertReceiveChannelManagerWithMaintenance(models.Manager):
+ def get_queryset(self):
+ return AlertReceiveChannelQueryset(self.model, using=self._db).filter(deleted_at=None)
+
+ def hard_delete(self):
+ return self.get_queryset().hard_delete()
+
+
+class AlertReceiveChannel(IntegrationOptionsMixin, MaintainableObject):
+ """
+ Channel generated by user to receive Alerts to.
+ """
+
+ objects = AlertReceiveChannelManager()
+ objects_with_maintenance = AlertReceiveChannelManagerWithMaintenance()
+ objects_with_deleted = models.Manager()
+
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_alert_receive_channel,
+ )
+
+ created_at = models.DateTimeField(auto_now_add=True)
+ deleted_at = models.DateTimeField(blank=True, null=True)
+
+ integration = models.CharField(
+ max_length=100,
+ choices=IntegrationOptionsMixin.INTEGRATION_CHOICES,
+ default=IntegrationOptionsMixin.DEFAULT_INTEGRATION,
+ )
+
+ allow_source_based_resolving = models.BooleanField(default=True)
+
+ token = models.CharField(max_length=30, default=random_token_generator, db_index=True)
+ organization = models.ForeignKey(
+ "user_management.Organization",
+ on_delete=models.CASCADE,
+ related_name="alert_receive_channels",
+ )
+ author = models.ForeignKey(
+ "user_management.User", on_delete=models.SET_NULL, related_name="alert_receive_channels", blank=True, null=True
+ )
+ team = models.ForeignKey(
+ "user_management.Team",
+ on_delete=models.SET_NULL,
+ related_name="alert_receive_channels",
+ null=True,
+ default=None,
+ )
+
+ smile_code = models.TextField(default=":slightly_smiling_face:")
+
+ verbal_name = models.CharField(max_length=150, null=True, default=None)
+
+ integration_slack_channel_id = models.CharField(max_length=150, null=True, default=None)
+
+ is_finished_alerting_setup = models.BooleanField(default=False)
+
+ slack_title_template = models.TextField(null=True, default=None)
+ slack_message_template = models.TextField(null=True, default=None)
+ slack_image_url_template = models.TextField(null=True, default=None)
+
+ sms_title_template = models.TextField(null=True, default=None)
+
+ phone_call_title_template = models.TextField(null=True, default=None)
+
+ web_title_template = models.TextField(null=True, default=None)
+ web_message_template = models.TextField(null=True, default=None)
+ web_image_url_template = models.TextField(null=True, default=None)
+
+ email_title_template = models.TextField(null=True, default=None)
+ email_message_template = models.TextField(null=True, default=None)
+
+ telegram_title_template = models.TextField(null=True, default=None)
+ telegram_message_template = models.TextField(null=True, default=None)
+ telegram_image_url_template = models.TextField(null=True, default=None)
+
+ source_link_template = models.TextField(null=True, default=None)
+ grouping_id_template = models.TextField(null=True, default=None)
+ resolve_condition_template = models.TextField(null=True, default=None)
+ acknowledge_condition_template = models.TextField(null=True, default=None)
+
+ PUBLIC_TEMPLATES_FIELDS = {
+ "grouping_key": "grouping_id_template",
+ "resolve_signal": "resolve_condition_template",
+ "acknowledge_signal": "acknowledge_condition_template",
+ "slack": {
+ "title": "slack_title_template",
+ "message": "slack_message_template",
+ "image_url": "slack_image_url_template",
+ },
+ "web": {
+ "title": "web_title_template",
+ "message": "web_message_template",
+ "image_url": "web_image_url_template",
+ },
+ "sms": {
+ "title": "sms_title_template",
+ },
+ "phone_call": {
+ "title": "phone_call_title_template",
+ },
+ "email": {
+ "title": "email_title_template",
+ "message": "email_message_template",
+ },
+ "telegram": {
+ "title": "telegram_title_template",
+ "message": "telegram_message_template",
+ "image_url": "telegram_image_url_template",
+ },
+ }
+
+ # additional messaging backends templates
+ # e.g. {'': {'title': 'title template', 'message': 'message template', 'image_url': 'url template'}}
+ messaging_backends_templates = models.JSONField(null=True, default=None)
+
+ rate_limited_in_slack_at = models.DateTimeField(null=True, default=None)
+ rate_limit_message_task_id = models.CharField(max_length=100, null=True, default=None)
+
+ class Meta:
+ constraints = [
+ models.UniqueConstraint(
+ fields=["organization", "verbal_name", "deleted_at"],
+ name="unique integration name",
+ )
+ ]
+
+ def __str__(self):
+ short_name_with_emojis = emojize(self.short_name, use_aliases=True)
+ return f"{self.pk}: {short_name_with_emojis}"
+
+ def get_template_attribute(self, render_for, attr_name):
+ value = None
+ if self.messaging_backends_templates:
+ backend_id = render_for.upper()
+ value = self.messaging_backends_templates.get(backend_id, {}).get(attr_name)
+ return value
+
+ def get_default_template_attribute(self, render_for, attr_name):
+ defaults = {}
+ backend_id = render_for.upper()
+ # check backend exists
+ if get_messaging_backend_from_id(backend_id):
+ # fallback to web defaults for now
+ defaults = getattr(self, f"INTEGRATION_TO_DEFAULT_WEB_{attr_name.upper()}_TEMPLATE", {})
+ return defaults.get(self.integration)
+
+ @classmethod
+ def create(cls, **kwargs):
+ with transaction.atomic():
+ other_channels = cls.objects_with_deleted.select_for_update().filter(organization=kwargs["organization"])
+ channel = cls(**kwargs)
+ smile_code = number_to_smiles_translator(other_channels.count())
+ verbal_name = (
+ kwargs.get("verbal_name") or f"{dict(cls.INTEGRATION_CHOICES)[kwargs['integration']]}" f" {smile_code}"
+ )
+ channel.smile_code = smile_code
+ channel.verbal_name = verbal_name
+ channel.save()
+ return channel
+
+ def delete(self):
+ self.deleted_at = timezone.now()
+ self.save()
+
+ def hard_delete(self):
+ super(AlertReceiveChannel, self).delete()
+
+ def change_team(self, team_id, user):
+ EscalationPolicy = apps.get_model("alerts", "EscalationPolicy")
+
+ if team_id == self.team_id:
+ raise TeamCanNotBeChangedError("Integration is already in this team")
+
+ if team_id is not None:
+ new_team = user.teams.filter(public_primary_key=team_id).first()
+ if not new_team:
+ raise TeamCanNotBeChangedError("User is not a member of the selected team")
+ else:
+ new_team = None # means General team
+
+ escalation_chains_pks = self.channel_filters.all().values_list("escalation_chain", flat=True)
+ escalation_chains = self.organization.escalation_chains.filter(pk__in=escalation_chains_pks).annotate(
+ num_integrations=Count(
+ "channel_filters__alert_receive_channel",
+ distinct=True,
+ filter=Q(channel_filters__alert_receive_channel__deleted_at__isnull=True),
+ ),
+ )
+ if escalation_chains:
+ # check if escalation chains are connected to routes of other integrations
+ shared_escalation_chains = []
+ for escalation_chain in escalation_chains:
+ if escalation_chain.num_integrations > 1:
+ shared_escalation_chains.append(escalation_chain)
+ if shared_escalation_chains:
+ shared_escalation_chains_verbal = ", ".join([ec.name for ec in shared_escalation_chains])
+ raise TeamCanNotBeChangedError(
+ f"Team cannot be changed because one or more escalation chain of integration routes "
+ f"is connected to other integration: {shared_escalation_chains_verbal}"
+ )
+
+ escalation_policies = EscalationPolicy.objects.filter(escalation_chain__in=escalation_chains)
+
+ users_in_escalation = self.organization.users.filter(escalationpolicy__in=escalation_policies)
+ if new_team:
+ team_members = new_team.users.filter(pk__in=[user.pk for user in users_in_escalation])
+ else:
+ team_members = self.organization.users.filter(pk__in=[user.pk for user in users_in_escalation])
+ not_team_members = set(users_in_escalation) - set(team_members)
+ if not_team_members:
+ not_team_members_verbal = ", ".join([user.username for user in not_team_members])
+ raise TeamCanNotBeChangedError(
+ f"Team cannot be changed because one or more user from escalation chain(s) is not a member "
+ f"of the selected team: {not_team_members_verbal}"
+ )
+
+ escalation_chains.update(team=new_team)
+ self.team = new_team
+ self.save(update_fields=["team"])
+
+ @cached_property
+ def grafana_alerting_sync_manager(self):
+ return GrafanaAlertingSyncManager(self)
+
+ @property
+ def emojized_verbal_name(self):
+ return emoji.emojize(self.verbal_name, use_aliases=True)
+
+ @property
+ def new_incidents_web_link(self):
+ return urljoin(
+ self.organization.web_link, f"?page=incidents&integration={self.public_primary_key}&status=0&p=1"
+ )
+
+ @property
+ def is_rate_limited_in_slack(self):
+ return (
+ self.rate_limited_in_slack_at is not None
+ and self.rate_limited_in_slack_at + SLACK_RATE_LIMIT_TIMEOUT > timezone.now()
+ )
+
+ def start_send_rate_limit_message_task(self, delay=SLACK_RATE_LIMIT_DELAY):
+ task_id = celery_uuid()
+ self.rate_limit_message_task_id = task_id
+ self.rate_limited_in_slack_at = timezone.now()
+ self.save(update_fields=["rate_limit_message_task_id", "rate_limited_in_slack_at"])
+ post_slack_rate_limit_message.apply_async((self.pk,), countdown=delay, task_id=task_id)
+
+ @property
+ def repr_settings_for_client_side_logging(self):
+ """
+ Example of execution:
+ name: Grafana :blush:, team: example, auto resolve allowed: Yes
+ templates:
+ Slack title: *<{{ grafana_oncall_link }}|#{{ grafana_oncall_id }} Custom title>* via {{ integration_name }}
+ {% if source_link %}
+ (*<{{ source_link }}|source>*)
+ {%- endif %},
+ Slack message: default,
+ Slack image url: default,
+ SMS title: default,
+ Phone call title: default,
+ Web title: default,
+ Web message: default,
+ Web image url: default,
+ Email title: default,
+ Email message: default,
+ Telegram title: default,
+ Telegram message: default,
+ Telegram image url: default,
+ Source link: default,
+ Grouping id: default,
+ Resolve condition: default,
+ Acknowledge condition: default
+ """
+ result = f"name: {self.verbal_name}, team: {self.team.name if self.team else 'No team'}"
+ if self.is_able_to_autoresolve:
+ result += f", auto resolve allowed: {'Yes' if self.allow_source_based_resolving else 'No'}"
+ if self.integration == AlertReceiveChannel.INTEGRATION_SLACK_CHANNEL:
+ slack_channel = None
+ if self.integration_slack_channel_id:
+ SlackChannel = apps.get_model("slack", "SlackChannel")
+ slack_channel = SlackChannel.objects.filter(
+ slack_team_identity=self.organization.slack_team_identity,
+ slack_id=self.integration_slack_channel_id,
+ ).first()
+ result += f", slack channel: {slack_channel.name if slack_channel else 'not selected'}"
+ result += (
+ f"\ntemplates:\nSlack title: {self.slack_title_template or 'default'},\n"
+ f"Slack message: {self.slack_message_template or 'default'},\n"
+ f"Slack image url: {self.slack_image_url_template or 'default'},\n"
+ f"SMS title: {self.sms_title_template or 'default'},\n"
+ f"Phone call title: {self.phone_call_title_template or 'default'},\n"
+ f"Web title: {self.web_title_template or 'default'},\n"
+ f"Web message: {self.web_message_template or 'default'},\n"
+ f"Web image url: {self.web_image_url_template or 'default'},\n"
+ f"Email title: {self.email_title_template or 'default'},\n"
+ f"Email message: {self.email_message_template or 'default'},\n"
+ f"Telegram title: {self.telegram_title_template or 'default'},\n"
+ f"Telegram message: {self.telegram_message_template or 'default'},\n"
+ f"Telegram image url: {self.telegram_image_url_template or 'default'},\n"
+ f"Source link: {self.source_link_template or 'default'},\n"
+ f"Grouping id: {self.grouping_id_template or 'default'},\n"
+ f"Resolve condition: {self.resolve_condition_template or 'default'},\n"
+ f"Acknowledge condition: {self.acknowledge_condition_template or 'default'}"
+ )
+ return result
+
+ @property
+ def alert_groups_count(self):
+ return self.alert_groups.count()
+
+ @property
+ def alerts_count(self):
+ Alert = apps.get_model("alerts", "Alert")
+ return Alert.objects.filter(group__channel=self).count()
+
+ @property
+ def is_able_to_autoresolve(self):
+ return self.config.is_able_to_autoresolve
+
+ @property
+ def is_demo_alert_enabled(self):
+ return self.config.is_demo_alert_enabled
+
+ @property
+ def description(self):
+ if self.integration == AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING:
+ contact_points = self.contact_points.all()
+ rendered_description = Template(self.config.description).render(
+ is_finished_alerting_setup=self.is_finished_alerting_setup,
+ grafana_alerting_entities=[
+ {
+ "alertmanager_name": f"""
+ {'Grafana' if contact_point.datasource_name == 'grafana' else contact_point.datasource_name}
+ """,
+ "contact_point_url": f"/alerting/notifications/receivers/{self.emojized_verbal_name}/"
+ f"edit?alertmanager={contact_point.datasource_name}",
+ "routes_url": f"/alerting/routes?alertmanager={contact_point.datasource_name}",
+ }
+ for contact_point in contact_points
+ ],
+ )
+ else:
+ rendered_description = None
+ return rendered_description
+
+ @classmethod
+ def get_or_create_manual_integration(cls, defaults, **kwargs):
+ try:
+ alert_receive_channel = cls.objects.get(
+ organization=kwargs["organization"], integration=kwargs["integration"]
+ )
+ except cls.DoesNotExist:
+ kwargs.update(defaults)
+ alert_receive_channel = cls.create(**kwargs)
+ return alert_receive_channel
+
+ @property
+ def short_name(self):
+ if self.verbal_name is None:
+ return self.created_name + "" if self.deleted_at is None else "(Deleted)"
+ elif self.verbal_name == self.created_name:
+ return self.verbal_name
+ else:
+ return (
+ f"{self.verbal_name} - {self.get_integration_display()}"
+ f"{'' if self.deleted_at is None else '(Deleted)'}"
+ )
+
+ @property
+ def short_name_with_maintenance_status(self):
+ if self.maintenance_mode is not None:
+ return (
+ self.short_name + f" *[ on "
+ f"{AlertReceiveChannel.MAINTENANCE_MODE_CHOICES[self.maintenance_mode][1]}"
+ f" :construction: ]*"
+ )
+ else:
+ return self.short_name
+
+ @property
+ def created_name(self):
+ return f"{self.get_integration_display()} {self.smile_code}"
+
+ @property
+ def web_link(self):
+ return urljoin(self.organization.web_link, "?page=settings")
+
+ @property
+ def integration_url(self):
+ if self.integration in [
+ AlertReceiveChannel.INTEGRATION_MANUAL,
+ AlertReceiveChannel.INTEGRATION_SLACK_CHANNEL,
+ AlertReceiveChannel.INTEGRATION_INBOUND_EMAIL,
+ AlertReceiveChannel.INTEGRATION_MAINTENANCE,
+ ]:
+ return None
+ return urljoin(
+ settings.BASE_URL,
+ f"/integrations/v1/{self.config.slug}/{self.token}/",
+ )
+
+ @property
+ def inbound_email(self):
+ return f"{self.token}@{live_settings.SENDGRID_INBOUND_EMAIL_DOMAIN}"
+
+ @property
+ def default_channel_filter(self):
+ return self.channel_filters.filter(is_default=True).first()
+
+ # Templating
+ @property
+ def templates(self):
+ return {
+ "grouping_key": self.grouping_id_template,
+ "resolve_signal": self.resolve_condition_template,
+ "acknowledge_signal": self.acknowledge_condition_template,
+ "slack": {
+ "title": self.slack_title_template,
+ "message": self.slack_message_template,
+ "image_url": self.slack_image_url_template,
+ },
+ "web": {
+ "title": self.web_title_template,
+ "message": self.web_message_template,
+ "image_url": self.web_image_url_template,
+ },
+ "email": {
+ "title": self.email_title_template,
+ "message": self.email_message_template,
+ },
+ "sms": {
+ "title": self.sms_title_template,
+ },
+ "phone_call": {
+ "title": self.phone_call_title_template,
+ },
+ "telegram": {
+ "title": self.telegram_title_template,
+ "message": self.telegram_message_template,
+ "image_url": self.telegram_image_url_template,
+ },
+ }
+
+ @property
+ def is_available_for_custom_templates(self):
+ return True
+
+ # Maintenance
+ def start_disable_maintenance_task(self, countdown):
+ maintenance_uuid = disable_maintenance.apply_async(
+ args=(),
+ kwargs={
+ "alert_receive_channel_id": self.pk,
+ },
+ countdown=countdown,
+ )
+ return maintenance_uuid
+
+ def get_organization(self):
+ return self.organization
+
+ def get_team(self):
+ return self.team
+
+ def get_verbal(self):
+ return self.verbal_name
+
+ def force_disable_maintenance(self, user):
+ disable_maintenance(alert_receive_channel_id=self.pk, force=True, user_id=user.pk)
+
+ def notify_about_maintenance_action(self, text, send_to_general_log_channel=True):
+ channel_ids = list(
+ self.channel_filters.filter(slack_channel_id__isnull=False, notify_in_slack=False).values_list(
+ "slack_channel_id", flat=True
+ )
+ )
+
+ if send_to_general_log_channel:
+ general_log_channel_id = self.organization.general_log_channel_id
+ if general_log_channel_id is not None:
+ channel_ids.append(general_log_channel_id)
+ unique_channels_id = set(channel_ids)
+ for channel_id in unique_channels_id:
+ post_message_to_channel(self.organization, channel_id, text)
+
+ # Heartbeat
+ @property
+ def is_available_for_integration_heartbeat(self):
+ return self.heartbeat_module is not None
+
+ @property
+ def heartbeat_restored_title(self):
+ return getattr(self.heartbeat_module, "heartbeat_restored_title")
+
+ @property
+ def heartbeat_restored_message(self):
+ return getattr(self.heartbeat_module, "heartbeat_restored_message")
+
+ @property
+ def heartbeat_restored_payload(self):
+ return getattr(self.heartbeat_module, "heartbeat_restored_payload")
+
+ @property
+ def heartbeat_expired_title(self):
+ return getattr(self.heartbeat_module, "heartbeat_expired_title")
+
+ @property
+ def heartbeat_expired_message(self):
+ return getattr(self.heartbeat_module, "heartbeat_expired_message")
+
+ @property
+ def heartbeat_expired_payload(self):
+ return getattr(self.heartbeat_module, "heartbeat_expired_payload")
+
+ @property
+ def heartbeat_instruction_template(self):
+ return getattr(self.heartbeat_module, "heartbeat_instruction_template")
+
+ @property
+ def heartbeat_module(self):
+ return getattr(heartbeat, self.INTEGRATIONS_TO_REVERSE_URL_MAP[self.integration], None)
+
+ # Demo alerts
+ def send_demo_alert(self, force_route_id=None):
+ logger.info(f"send_demo_alert integration={self.pk} force_route_id={force_route_id}")
+ if self.is_demo_alert_enabled:
+ if self.has_alertmanager_payload_structure:
+ for alert in self.config.example_payload.get("alerts", []):
+ create_alertmanager_alerts.apply_async(
+ [],
+ {
+ "alert_receive_channel_pk": self.pk,
+ "alert": alert,
+ "is_demo": True,
+ "force_route_id": force_route_id,
+ },
+ )
+ else:
+ create_alert.apply_async(
+ [],
+ {
+ "title": "Demo alert",
+ "message": "Demo alert",
+ "image_url": None,
+ "link_to_upstream_details": None,
+ "alert_receive_channel_pk": self.pk,
+ "integration_unique_data": None,
+ "raw_request_data": self.config.example_payload,
+ "is_demo": True,
+ "force_route_id": force_route_id,
+ },
+ )
+ else:
+ raise UnableToSendDemoAlert("Unable to send demo alert for this integration")
+
+ @property
+ def has_alertmanager_payload_structure(self):
+ return self.integration in (
+ AlertReceiveChannel.INTEGRATION_ALERTMANAGER,
+ AlertReceiveChannel.INTEGRATION_GRAFANA,
+ AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING,
+ )
+
+
+@receiver(post_save, sender=AlertReceiveChannel)
+def listen_for_alertreceivechannel_model_save(sender, instance, created, *args, **kwargs):
+ ChannelFilter = apps.get_model("alerts", "ChannelFilter")
+ IntegrationHeartBeat = apps.get_model("heartbeat", "IntegrationHeartBeat")
+
+ if created:
+ description = f"New integration {instance.verbal_name} was created"
+ create_organization_log(
+ instance.organization,
+ instance.author,
+ type=OrganizationLogType.TYPE_INTEGRATION_CREATED,
+ description=description,
+ )
+ default_filter = ChannelFilter(alert_receive_channel=instance, filtering_term=None, is_default=True)
+ default_filter.save()
+ filter_verbal = default_filter.verbal_name_for_clients.capitalize()
+ description = f"{filter_verbal} was created for integration {instance.verbal_name}"
+ create_organization_log(
+ instance.organization,
+ None,
+ OrganizationLogType.TYPE_CHANNEL_FILTER_CREATED,
+ description,
+ )
+ TEN_MINUTES = 600 # this is timeout for cloud heartbeats
+ if instance.is_available_for_integration_heartbeat:
+ IntegrationHeartBeat.objects.create(alert_receive_channel=instance, timeout_seconds=TEN_MINUTES)
+ description = f"Heartbeat for integration {instance.verbal_name} was created"
+ create_organization_log(
+ instance.organization, None, OrganizationLogType.TYPE_HEARTBEAT_CREATED, description
+ )
+ else:
+ logger.info(f"Drop AG cache. Reason: save alert_receive_channel {instance.pk}")
+ if kwargs is not None:
+ if "update_fields" in kwargs:
+ if kwargs["update_fields"] is not None:
+ # Hack to not to invalidate web cache on AlertReceiveChannel.start_send_rate_limit_message_task
+ if "rate_limit_message_task_id" in kwargs["update_fields"]:
+ return
+
+ invalidate_web_cache_for_alert_group.apply_async(kwargs={"channel_pk": instance.pk})
+
+ if instance.integration == AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING:
+ if created:
+ instance.grafana_alerting_sync_manager.create_contact_points()
+ # do not trigger sync contact points if field "is_finished_alerting_setup" was updated
+ elif (
+ kwargs is None
+ or not kwargs.get("update_fields")
+ or "is_finished_alerting_setup" not in kwargs["update_fields"]
+ ):
+ sync_grafana_alerting_contact_points.apply_async((instance.pk,), countdown=5)
diff --git a/engine/apps/alerts/models/channel_filter.py b/engine/apps/alerts/models/channel_filter.py
new file mode 100644
index 0000000000..b1f1dae2bc
--- /dev/null
+++ b/engine/apps/alerts/models/channel_filter.py
@@ -0,0 +1,186 @@
+import json
+import logging
+import re
+
+from django.apps import apps
+from django.conf import settings
+from django.core.validators import MinLengthValidator
+from django.db import models
+from ordered_model.models import OrderedModel
+
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+
+logger = logging.getLogger(__name__)
+
+
+def generate_public_primary_key_for_channel_filter():
+ prefix = "R"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while ChannelFilter.objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="ChannelFilter"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+class ChannelFilter(OrderedModel):
+ """
+ Actually it's a Router based on terms now. Not a Filter.
+ """
+
+ order_with_respect_to = ("alert_receive_channel", "is_default")
+
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_channel_filter,
+ )
+
+ alert_receive_channel = models.ForeignKey(
+ "alerts.AlertReceiveChannel", on_delete=models.CASCADE, related_name="channel_filters"
+ )
+
+ escalation_chain = models.ForeignKey(
+ "alerts.EscalationChain", null=True, default=None, on_delete=models.SET_NULL, related_name="channel_filters"
+ )
+
+ notify_in_slack = models.BooleanField(null=True, default=True)
+ notify_in_telegram = models.BooleanField(null=True, default=False)
+
+ slack_channel_id = models.CharField(max_length=100, null=True, default=None)
+
+ telegram_channel = models.ForeignKey(
+ "telegram.TelegramToOrganizationConnector",
+ on_delete=models.SET_NULL,
+ null=True,
+ default=None,
+ related_name="channel_filter",
+ )
+
+ # track additional messaging backends config
+ # e.g. {'': {'channel': '', 'enabled': True}}
+ notification_backends = models.JSONField(null=True, default=None)
+
+ created_at = models.DateTimeField(auto_now_add=True)
+ filtering_term = models.CharField(max_length=1024, null=True, default=None)
+ is_default = models.BooleanField(default=False)
+
+ class Meta:
+ ordering = (
+ "alert_receive_channel",
+ "is_default",
+ "order",
+ )
+
+ def __str__(self):
+ return f"{self.pk}: {self.filtering_term or 'default'}"
+
+ @classmethod
+ def select_filter(cls, alert_receive_channel, raw_request_data, title, message=None, force_route_id=None):
+ # Try to find force route first if force_route_id is given
+ if force_route_id is not None:
+ logger.info(
+ f"start select_filter with force_route_id={force_route_id} alert_receive_channel={alert_receive_channel.pk}."
+ )
+ try:
+ satisfied_filter = cls.objects.get(
+ alert_receive_channel=alert_receive_channel.pk,
+ pk=force_route_id,
+ )
+ logger.info(
+ f"success select_filter with force_route_id={force_route_id} alert_receive_channel={alert_receive_channel.pk}."
+ )
+ return satisfied_filter
+ except cls.DoesNotExist:
+ # If force route was not found fallback to default routing.
+ logger.info(
+ f"select_filter unable to find force_route_id={force_route_id} alert_receive_channel={alert_receive_channel.pk}."
+ )
+ pass
+
+ filters = cls.objects.filter(alert_receive_channel=alert_receive_channel)
+
+ satisfied_filter = None
+ for _filter in filters:
+ if satisfied_filter is None and _filter.is_satisfying(raw_request_data, title, message):
+ satisfied_filter = _filter
+
+ return satisfied_filter
+
+ def is_satisfying(self, raw_request_data, title, message=None):
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+
+ return (
+ self.is_default
+ or self.check_filter(json.dumps(raw_request_data))
+ or self.check_filter(str(title))
+ or
+ # Special case for Amazon SNS
+ (
+ self.check_filter(str(message))
+ if self.alert_receive_channel.integration == AlertReceiveChannel.INTEGRATION_AMAZON_SNS
+ else False
+ )
+ )
+
+ def check_filter(self, value):
+ return re.search(self.filtering_term, value)
+
+ @property
+ def slack_channel_id_or_general_log_id(self):
+ organization = self.alert_receive_channel.organization
+ slack_team_identity = organization.slack_team_identity
+ if slack_team_identity is None:
+ return None
+ if self.slack_channel_id is None:
+ return organization.general_log_channel_id
+ else:
+ return self.slack_channel_id
+
+ @property
+ def repr_settings_for_client_side_logging(self):
+ """
+ Example of execution:
+ term: .*, order: 0, slack notification allowed: Yes, telegram notification allowed: Yes,
+ slack channel: without_amixr_general_channel, telegram channel: default
+ """
+ result = (
+ f"term: {self.str_for_clients}, order: {self.order}, slack notification allowed: "
+ f"{'Yes' if self.notify_in_slack else 'No'}, telegram notification allowed: "
+ f"{'Yes' if self.notify_in_telegram else 'No'}"
+ )
+ if self.notification_backends:
+ for backend_id, backend in self.notification_backends.items():
+ result += f", {backend_id} notification allowed: {'Yes' if backend.get('enabled') else 'No'}"
+ slack_channel = None
+ if self.slack_channel_id:
+ SlackChannel = apps.get_model("slack", "SlackChannel")
+ sti = self.alert_receive_channel.organization.slack_team_identity
+ slack_channel = SlackChannel.objects.filter(slack_team_identity=sti, slack_id=self.slack_channel_id).first()
+ result += f", slack channel: {slack_channel.name if slack_channel else 'default'}"
+ result += f", telegram channel: {self.telegram_channel.channel_name if self.telegram_channel else 'default'}"
+ if self.notification_backends:
+ for backend_id, backend in self.notification_backends.items():
+ channel = backend.get("channel_id") or "default"
+ result += f", {backend_id} channel: {channel}"
+ result += f", escalation chain: {self.escalation_chain.name if self.escalation_chain else 'not selected'}"
+ return result
+
+ @property
+ def str_for_clients(self):
+ if self.filtering_term is None:
+ return "default"
+ return str(self.filtering_term).replace("`", "")
+
+ @property
+ def verbal_name_for_clients(self):
+ return "default route" if self.is_default else f"route `{self.str_for_clients}`"
+
+ def send_demo_alert(self):
+ integration = self.alert_receive_channel
+ integration.send_demo_alert(force_route_id=self.pk)
diff --git a/engine/apps/alerts/models/custom_button.py b/engine/apps/alerts/models/custom_button.py
new file mode 100644
index 0000000000..9007c34f9c
--- /dev/null
+++ b/engine/apps/alerts/models/custom_button.py
@@ -0,0 +1,184 @@
+import json
+import logging
+import re
+
+from django.conf import settings
+from django.core.validators import MinLengthValidator
+from django.db import models
+from django.db.models import F
+from django.utils import timezone
+from jinja2 import Template
+from requests.auth import HTTPBasicAuth
+
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+def generate_public_primary_key_for_custom_button():
+ prefix = "K"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while CustomButton.objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="CustomButton"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+class CustomButtonQueryset(models.QuerySet):
+ def delete(self):
+ self.update(deleted_at=timezone.now(), name=F("name") + "_deleted_" + F("public_primary_key"))
+
+
+class CustomButtonManager(models.Manager):
+ def get_queryset(self):
+ return CustomButtonQueryset(self.model, using=self._db).filter(deleted_at=None)
+
+ def hard_delete(self):
+ return self.get_queryset().hard_delete()
+
+
+class CustomButton(models.Model):
+
+ objects = CustomButtonManager()
+ objects_with_deleted = models.Manager()
+
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_custom_button,
+ )
+
+ organization = models.ForeignKey(
+ "user_management.Organization", on_delete=models.CASCADE, related_name="custom_buttons"
+ )
+ team = models.ForeignKey(
+ "user_management.Team",
+ on_delete=models.SET_NULL,
+ related_name="custom_buttons",
+ null=True,
+ default=None,
+ )
+ created_at = models.DateTimeField(auto_now_add=True)
+ name = models.CharField(max_length=100)
+ webhook = models.CharField(max_length=1000, null=True, default=None)
+ data = models.TextField(null=True, default=None)
+ user = models.CharField(max_length=100, null=True, default=None)
+ password = models.CharField(max_length=100, null=True, default=None)
+ deleted_at = models.DateTimeField(blank=True, null=True)
+ authorization_header = models.CharField(max_length=1000, null=True, default=None)
+ forward_whole_payload = models.BooleanField(default=False)
+
+ class Meta:
+ unique_together = ("name", "organization")
+
+ def __str__(self):
+ return str(self.name)
+
+ def delete(self):
+ logger.info(f"Soft delete of custom button {self}")
+ self.escalation_policies.all().delete()
+ self.deleted_at = timezone.now()
+ # 100 - 22 = 78. 100 is max len of name field, and 22 is len of suffix _deleted_
+ # So for case when user created button with maximum length name it is needed to trim it to 78 chars to be
+ # able to add suffix.
+ self.name = f"{self.name[:78]}_deleted_{self.public_primary_key}"
+ self.save()
+
+ def hard_delete(self):
+ super().delete()
+
+ @property
+ def repr_settings_for_client_side_logging(self):
+ """
+ Example of execution:
+ name: example, team: example, webhook: https://example.com, user: None, password: None,
+ authorization header: None, data: None
+ """
+ return (
+ f"name: {self.name}, team: {self.team.name if self.team else 'No team'}, webhook: {self.webhook}, "
+ f"user: {self.user}, password: {self.password}, authorization header: {self.authorization_header}, "
+ f"data: {self.data}, forward_whole_payload {self.forward_whole_payload}"
+ )
+
+ def build_post_kwargs(self, alert):
+ post_kwargs = {}
+ if self.user and self.password:
+ post_kwargs["auth"] = HTTPBasicAuth(self.user, self.password)
+ if self.authorization_header:
+ post_kwargs["headers"] = {"Authorization": self.authorization_header}
+ if self.forward_whole_payload:
+ post_kwargs["json"] = alert.raw_request_data
+ elif self.data:
+ rendered_data = Template(self.data).render(
+ {
+ "alert_title": self._escape_string(alert.title),
+ "alert_message": self._escape_string(alert.message),
+ "alert_url": alert.link_to_upstream_details,
+ "alert_payload": self._escape_alert_payload(alert.raw_request_data),
+ "alert_payload_json": json.dumps(alert.raw_request_data),
+ }
+ )
+ post_kwargs["json"] = json.loads(rendered_data)
+ return post_kwargs
+
+ def _escape_alert_payload(self, payload: dict):
+ if isinstance(payload, dict):
+ escaped_data = EscapeDoubleQuotesDict()
+ for key in payload.keys():
+ escaped_data[key] = self._escape_alert_payload(payload[key])
+ elif isinstance(payload, list):
+ escaped_data = []
+ for value in payload:
+ escaped_data.append(self._escape_alert_payload(value))
+ elif isinstance(payload, str):
+ escaped_data = self._escape_string(payload)
+ else:
+ escaped_data = payload
+ return escaped_data
+
+ def _escape_string(self, string: str):
+ """
+ Escapes string to use in json.loads() method.
+ json.dumps is the simples way to escape all special characters in string.
+ First and last chars are quotes from json.dumps(), we don't need them, only escaping.
+ """
+ return json.dumps(string)[1:-1]
+
+
+class EscapeDoubleQuotesDict(dict):
+ """
+ Warning: Please, do not use this dict anywhere except CustomButton._escape_alert_payload.
+ This custom dict escapes double quotes to produce string which is safe to pass to json.loads()
+ It fixes case when CustomButton.build_post_kwargs failing on payloads which contains string with single quote.
+ In this case built-in dict's str method will surround value with double quotes.
+
+ For example:
+
+ alert_payload = {
+ "text": "Hi, it's alert",
+ }
+ template = '{"data" : "{{ alert_payload }}"}'
+ rendered = '{"data" : "{\'text\': "Hi, it\'s alert"}"}'
+ # and json.loads(rendered) will fail due to unescaped double quotes
+
+ # Now with EscapeDoubleQuotesDict.
+
+ alert_payload = EscapeDoubleQuotesDict({
+ "text": "Hi, it's alert",
+ })
+ rendered = '{"data" : "{\'text\': \\"Hi, it\'s alert\\"}"}'
+ # and json.loads(rendered) works.
+ """
+
+ def __str__(self):
+ original_str = super().__str__()
+ if '"' in original_str:
+ return re.sub('(?X alerts per Y minutes"),
+ )
+
+ # Ordered step choices available for internal api.
+ # There are not important steps because they are presented as default step with important flag
+ INTERNAL_API_STEPS = [
+ # Common
+ STEP_WAIT,
+ STEP_NOTIFY_MULTIPLE_USERS,
+ STEP_NOTIFY_SCHEDULE,
+ STEP_FINAL_RESOLVE,
+ # Slack
+ STEP_FINAL_NOTIFYALL,
+ STEP_NOTIFY_GROUP,
+ # Other
+ STEP_TRIGGER_CUSTOM_BUTTON,
+ STEP_NOTIFY_USERS_QUEUE,
+ STEP_NOTIFY_IF_TIME,
+ STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW,
+ STEP_REPEAT_ESCALATION_N_TIMES,
+ ]
+ # Steps can be stored in db while interacting with internal api
+ # Includes important versions of default steps
+ INTERNAL_DB_STEPS = [
+ STEP_WAIT,
+ STEP_FINAL_NOTIFYALL,
+ STEP_FINAL_RESOLVE,
+ STEP_NOTIFY_GROUP,
+ STEP_NOTIFY_SCHEDULE,
+ STEP_NOTIFY_GROUP_IMPORTANT,
+ STEP_NOTIFY_SCHEDULE_IMPORTANT,
+ STEP_NOTIFY_USERS_QUEUE,
+ STEP_NOTIFY_IF_TIME,
+ STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW,
+ STEP_NOTIFY_MULTIPLE_USERS,
+ STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
+ STEP_TRIGGER_CUSTOM_BUTTON,
+ STEP_REPEAT_ESCALATION_N_TIMES,
+ ]
+
+ # Maps internal api's steps choices to their verbal. First string in tuple is display name for existent step.
+ # Second one is for option in dropdown.
+ INTERNAL_API_STEPS_TO_VERBAL_MAP = {
+ # Common steps
+ STEP_WAIT: ("Wait {{wait_delay}} minute(s)", "Wait"),
+ STEP_NOTIFY_MULTIPLE_USERS: ("Start {{importance}} notification for {{users}}", "Notify users"),
+ STEP_NOTIFY_SCHEDULE: (
+ "Start {{importance}} notification for schedule {{schedule}}",
+ "Notify users from on-call schedule",
+ ),
+ STEP_FINAL_RESOLVE: ("Resolve incident automatically", "Resolve incident automatically"),
+ # Slack
+ STEP_FINAL_NOTIFYALL: ("Notify whole Slack channel", "Notify whole Slack channel"),
+ STEP_NOTIFY_GROUP: (
+ "Start {{importance}} notification for everyone from Slack User Group {{slack_user_group}}",
+ "Notify Slack User Group",
+ ),
+ # Other
+ STEP_TRIGGER_CUSTOM_BUTTON: ("Trigger outgoing webhook {{custom_action}}", "Trigger outgoing webhook"),
+ STEP_NOTIFY_USERS_QUEUE: ("Round robin notification for {{users}}", "Notify users one by one (round-robin)"),
+ STEP_NOTIFY_IF_TIME: (
+ "Continue escalation if current time is in {{timerange}} ",
+ "Continue escalation if current time is in range",
+ ),
+ STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW: (
+ "Continue escalation if >{{num_alerts_in_window}} alerts per {{num_minutes_in_window}} minutes",
+ "Continue escalation if >X alerts per Y minutes",
+ ),
+ STEP_REPEAT_ESCALATION_N_TIMES: (
+ "Repeat escalation from the beginning (5 times max)",
+ "Repeat escalations from the beginning (5 times max)",
+ ),
+ }
+
+ STEPS_WITH_NO_IMPORTANT_VERSION_SET = {
+ STEP_WAIT,
+ STEP_FINAL_NOTIFYALL,
+ STEP_FINAL_RESOLVE,
+ STEP_TRIGGER_CUSTOM_BUTTON,
+ STEP_NOTIFY_USERS_QUEUE,
+ STEP_NOTIFY_IF_TIME,
+ STEP_REPEAT_ESCALATION_N_TIMES,
+ }
+
+ DEFAULT_TO_IMPORTANT_STEP_MAPPING = {
+ STEP_NOTIFY_GROUP: STEP_NOTIFY_GROUP_IMPORTANT,
+ STEP_NOTIFY_SCHEDULE: STEP_NOTIFY_SCHEDULE_IMPORTANT,
+ STEP_NOTIFY_MULTIPLE_USERS: STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
+ }
+ IMPORTANT_TO_DEFAULT_STEP_MAPPING = {
+ STEP_NOTIFY_GROUP_IMPORTANT: STEP_NOTIFY_GROUP,
+ STEP_NOTIFY_SCHEDULE_IMPORTANT: STEP_NOTIFY_SCHEDULE,
+ STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT: STEP_NOTIFY_MULTIPLE_USERS,
+ }
+
+ # Default steps are just usual version of important steps. E.g. notify group - notify group important
+ DEFAULT_STEPS_SET = {
+ STEP_NOTIFY_GROUP,
+ STEP_NOTIFY_SCHEDULE,
+ STEP_NOTIFY_MULTIPLE_USERS,
+ }
+
+ IMPORTANT_STEPS_SET = {
+ STEP_NOTIFY_GROUP_IMPORTANT,
+ STEP_NOTIFY_SCHEDULE_IMPORTANT,
+ STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
+ }
+
+ SLACK_INTEGRATION_REQUIRED_STEPS = [
+ STEP_NOTIFY_GROUP,
+ STEP_NOTIFY_GROUP_IMPORTANT,
+ STEP_FINAL_NOTIFYALL,
+ ]
+
+ PUBLIC_STEP_CHOICES = [
+ STEP_WAIT,
+ STEP_NOTIFY_SCHEDULE,
+ STEP_NOTIFY_MULTIPLE_USERS,
+ STEP_NOTIFY_USERS_QUEUE,
+ STEP_NOTIFY_GROUP,
+ STEP_FINAL_RESOLVE,
+ STEP_FINAL_NOTIFYALL,
+ STEP_TRIGGER_CUSTOM_BUTTON,
+ STEP_NOTIFY_IF_TIME,
+ STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW,
+ STEP_REPEAT_ESCALATION_N_TIMES,
+ ]
+
+ PUBLIC_STEP_CHOICES_MAP = {
+ STEP_WAIT: "wait",
+ STEP_NOTIFY: "notify_one_person",
+ STEP_FINAL_NOTIFYALL: "notify_whole_channel",
+ STEP_FINAL_RESOLVE: "resolve",
+ STEP_NOTIFY_GROUP: "notify_user_group",
+ STEP_NOTIFY_GROUP_IMPORTANT: "notify_user_group",
+ STEP_NOTIFY_IMPORTANT: "notify_one_person",
+ STEP_NOTIFY_SCHEDULE: "notify_on_call_from_schedule",
+ STEP_NOTIFY_SCHEDULE_IMPORTANT: "notify_on_call_from_schedule",
+ STEP_TRIGGER_CUSTOM_BUTTON: "trigger_action",
+ STEP_NOTIFY_USERS_QUEUE: "notify_person_next_each_time",
+ STEP_NOTIFY_MULTIPLE_USERS: "notify_persons",
+ STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT: "notify_persons",
+ STEP_NOTIFY_IF_TIME: "notify_if_time_from_to",
+ STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW: "notify_if_num_alerts_in_window",
+ STEP_REPEAT_ESCALATION_N_TIMES: "repeat_escalation",
+ }
+
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_escalation_policy,
+ )
+
+ escalation_chain = models.ForeignKey(
+ "alerts.EscalationChain", on_delete=models.CASCADE, related_name="escalation_policies"
+ )
+
+ notify_to_users_queue = models.ManyToManyField("user_management.User")
+
+ last_notified_user = models.ForeignKey(
+ "user_management.User",
+ on_delete=models.SET_NULL,
+ related_name="last_notified_in_escalation_policies",
+ default=None,
+ null=True,
+ )
+
+ step = models.IntegerField(choices=STEP_CHOICES, default=None, null=True)
+
+ notify_to_group = models.ForeignKey(
+ "slack.SlackUserGroup",
+ on_delete=models.SET_NULL,
+ default=None,
+ null=True,
+ )
+
+ notify_schedule = models.ForeignKey(
+ "schedules.OnCallSchedule",
+ on_delete=models.SET_NULL,
+ related_name="escalation_policies",
+ null=True,
+ default=None,
+ )
+
+ custom_button_trigger = models.ForeignKey(
+ "alerts.CustomButton",
+ on_delete=models.CASCADE,
+ related_name="escalation_policies",
+ default=None,
+ null=True,
+ )
+
+ ONE_MINUTE = timezone.timedelta(minutes=1)
+ FIVE_MINUTES = timezone.timedelta(minutes=5)
+ FIFTEEN_MINUTES = timezone.timedelta(minutes=15)
+ THIRTY_MINUTES = timezone.timedelta(minutes=30)
+ HOUR = timezone.timedelta(minutes=60)
+
+ DEFAULT_WAIT_DELAY = timezone.timedelta(minutes=5)
+
+ DURATION_CHOICES = (
+ (ONE_MINUTE, "1 min"),
+ (FIVE_MINUTES, "5 min"),
+ (FIFTEEN_MINUTES, "15 min"),
+ (THIRTY_MINUTES, "30 min"),
+ (HOUR, "60 min"),
+ )
+
+ WEB_DURATION_CHOICES = (
+ (ONE_MINUTE, "1"),
+ (FIVE_MINUTES, "5"),
+ (FIFTEEN_MINUTES, "15"),
+ (THIRTY_MINUTES, "30"),
+ (HOUR, "60"),
+ )
+
+ # the same choices for web, but in integer format for minutes instead of timedelta
+ WEB_DURATION_CHOICES_MINUTES = [(choice[0].seconds // 60, choice[1]) for choice in WEB_DURATION_CHOICES]
+
+ wait_delay = models.DurationField(default=None, null=True, choices=DURATION_CHOICES)
+
+ from_time = models.TimeField(null=True, default=None)
+ to_time = models.TimeField(null=True, default=None)
+
+ # fields needed for escalation step "Continue escalation if >X alerts per Y minutes"
+ num_alerts_in_window = models.PositiveIntegerField(null=True, default=None)
+ num_minutes_in_window = models.PositiveIntegerField(null=True, default=None)
+
+ def __str__(self):
+ return f"{self.pk}: {self.step_type_verbal}"
+
+ @property
+ def step_type_verbal(self):
+ return self.STEP_CHOICES[self.step][1] if self.step is not None else "Empty"
+
+ @property
+ def repr_settings_for_client_side_logging(self):
+ """
+ Example of execution:
+ step: 'Notify multiple Users', order: 0, important: No, users: Alex, Bob
+ Another example:
+ step: 'Continue escalation only if time is from', order: 4, from time: 09:40:00 (UTC), to time: 15:40:00 (UTC)
+ """
+ result = f"step: '{self.step_type_verbal}', order: {self.order}"
+ if self.step not in EscalationPolicy.STEPS_WITH_NO_IMPORTANT_VERSION_SET:
+ result += f", important: {'Yes' if self.step in EscalationPolicy.IMPORTANT_STEPS_SET else 'No'}"
+ if self.step == EscalationPolicy.STEP_WAIT:
+ result += f", wait: {self.get_wait_delay_display() if self.wait_delay else 'default'}"
+ elif self.step in [EscalationPolicy.STEP_NOTIFY_GROUP, EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT]:
+ result += f", user group: {self.notify_to_group.name if self.notify_to_group else 'not selected'}"
+ elif self.step in [EscalationPolicy.STEP_NOTIFY_SCHEDULE, EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT]:
+ result += f", on-call schedule: {self.notify_schedule.name if self.notify_schedule else 'not selected'}"
+ elif self.step == EscalationPolicy.STEP_TRIGGER_CUSTOM_BUTTON:
+ result += f", action: {self.custom_button_trigger.name if self.custom_button_trigger else 'not selected'}"
+ elif self.step in [
+ EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
+ ]:
+ if self.notify_to_users_queue:
+ users_verbal = ", ".join([user.username for user in self.sorted_users_queue])
+ else:
+ users_verbal = "not selected"
+ result += f", users: {users_verbal}"
+ elif self.step == EscalationPolicy.STEP_NOTIFY_IF_TIME:
+ if self.from_time:
+ from_time_verbal = self.from_time.isoformat() + " (UTC)"
+ else:
+ from_time_verbal = "not selected"
+ if self.to_time:
+ to_time_verbal = self.to_time.isoformat() + " (UTC)"
+ else:
+ to_time_verbal = "not selected"
+ result += f", from time: {from_time_verbal}, to time: {to_time_verbal}"
+ return result
+
+ @property
+ def sorted_users_queue(self):
+ return sorted(self.notify_to_users_queue.all(), key=lambda user: (user.username or "", user.pk))
+
+ @property
+ def slack_integration_required(self):
+ if self.step in self.SLACK_INTEGRATION_REQUIRED_STEPS:
+ return True
+ else:
+ return False
+
+ @staticmethod
+ def get_step_display_name(step):
+ step_name = ""
+ for step_choice in EscalationPolicy.STEP_CHOICES:
+ if step_choice[0] == step:
+ step_name = step_choice[1]
+ break
+ return step_name
diff --git a/engine/apps/alerts/models/grafana_alerting_contact_point.py b/engine/apps/alerts/models/grafana_alerting_contact_point.py
new file mode 100644
index 0000000000..d4cee24c47
--- /dev/null
+++ b/engine/apps/alerts/models/grafana_alerting_contact_point.py
@@ -0,0 +1,22 @@
+import logging
+
+from django.db import models
+
+logger = logging.getLogger(__name__)
+
+
+class GrafanaAlertingContactPoint(models.Model):
+ GRAFANA_CONTACT_POINT = "grafana"
+ ALERTING_DATASOURCE = "alertmanager"
+
+ alert_receive_channel = models.ForeignKey(
+ "alerts.AlertReceiveChannel",
+ on_delete=models.CASCADE,
+ null=True,
+ default=None,
+ related_name="contact_points",
+ )
+ uid = models.CharField(max_length=100, null=True, default=None) # uid is None for non-Grafana datasource
+ name = models.CharField(max_length=100)
+ datasource_name = models.CharField(max_length=100, default="grafana")
+ datasource_id = models.IntegerField(null=True, default=None) # id is None for Grafana datasource
diff --git a/engine/apps/alerts/models/invitation.py b/engine/apps/alerts/models/invitation.py
new file mode 100644
index 0000000000..10668d94b7
--- /dev/null
+++ b/engine/apps/alerts/models/invitation.py
@@ -0,0 +1,123 @@
+import logging
+
+from django.apps import apps
+from django.db import models, transaction
+from django.utils import timezone
+
+from apps.alerts.tasks import invite_user_to_join_incident, send_alert_group_signal
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+class Invitation(models.Model):
+ """
+ It's an invitation of a user to join working on Alert Group
+ """
+
+ ATTEMPTS_LIMIT = 10
+
+ time_deltas_by_attempts = [
+ timezone.timedelta(minutes=6),
+ timezone.timedelta(minutes=16),
+ timezone.timedelta(minutes=31),
+ timezone.timedelta(hours=1, minutes=1),
+ timezone.timedelta(hours=3, minutes=1),
+ ]
+
+ author = models.ForeignKey(
+ "user_management.User",
+ null=True,
+ on_delete=models.SET_NULL,
+ related_name="author_of_invitations",
+ )
+
+ invitee = models.ForeignKey(
+ "user_management.User",
+ null=True,
+ on_delete=models.SET_NULL,
+ related_name="invitee_in_invitations",
+ )
+
+ created_at = models.DateTimeField(auto_now_add=True)
+ is_active = models.BooleanField(default=True)
+ alert_group = models.ForeignKey("alerts.AlertGroup", on_delete=models.CASCADE, related_name="invitations")
+ attempt = models.IntegerField(default=0)
+
+ @property
+ def attempts_left(self):
+ return Invitation.ATTEMPTS_LIMIT - self.attempt
+
+ @staticmethod
+ def get_delay_by_attempt(attempt):
+ countdown = Invitation.time_deltas_by_attempts[-1]
+ if attempt < len(Invitation.time_deltas_by_attempts):
+ countdown = Invitation.time_deltas_by_attempts[attempt]
+ return countdown
+
+ @staticmethod
+ def invite_user(invitee_user, alert_group, user):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ # RFCT - why atomic? without select for update?
+ with transaction.atomic():
+ try:
+ invitation = Invitation.objects.get(
+ invitee=invitee_user,
+ alert_group=alert_group,
+ is_active=True,
+ )
+ invitation.is_active = False
+ invitation.save(update_fields=["is_active"])
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_RE_INVITE, author=user, alert_group=alert_group
+ )
+ except Invitation.DoesNotExist:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_INVITE,
+ author=user,
+ alert_group=alert_group,
+ )
+ invitation = Invitation(
+ invitee=invitee_user,
+ alert_group=alert_group,
+ is_active=True,
+ author=user,
+ )
+ invitation.save()
+
+ log_record.invitation = invitation
+ log_record.save()
+ logger.debug(
+ f"call send_alert_group_signal for alert_group {alert_group.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}'"
+ )
+ send_alert_group_signal.apply_async((log_record.pk,))
+
+ invite_user_to_join_incident.apply_async((invitation.pk,))
+
+ @staticmethod
+ def stop_invitation(invitation_pk, user):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ with transaction.atomic():
+ try:
+ invitation = Invitation.objects.filter(pk=invitation_pk).select_for_update()[0]
+ except IndexError:
+ return f"stop_invitation: Invitation with pk {invitation_pk} doesn't exist"
+ invitation.is_active = False
+ invitation.save(update_fields=["is_active"])
+
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_STOP_INVITATION,
+ author=user,
+ alert_group=invitation.alert_group,
+ invitation=invitation,
+ )
+
+ log_record.save()
+ logger.debug(
+ f"call send_alert_group_signal for alert_group {invitation.alert_group.pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}'"
+ )
+ send_alert_group_signal.apply_async((log_record.pk,))
diff --git a/engine/apps/alerts/models/maintainable_object.py b/engine/apps/alerts/models/maintainable_object.py
new file mode 100644
index 0000000000..9bf1d8cd1d
--- /dev/null
+++ b/engine/apps/alerts/models/maintainable_object.py
@@ -0,0 +1,188 @@
+from uuid import uuid4
+
+import humanize
+import pytz
+from django.apps import apps
+from django.db import models, transaction
+from django.utils import timezone
+
+from apps.slack.scenarios.scenario_step import ScenarioStep
+from apps.user_management.organization_log_creator import create_organization_log
+from common.exceptions import MaintenanceCouldNotBeStartedError
+
+
+class MaintainableObject(models.Model):
+ class Meta:
+ abstract = True
+
+ DURATION_ONE_HOUR = timezone.timedelta(hours=1)
+ DURATION_THREE_HOURS = timezone.timedelta(hours=3)
+ DURATION_SIX_HOURS = timezone.timedelta(hours=6)
+ DURATION_TWELVE_HOURS = timezone.timedelta(hours=12)
+ DURATION_TWENTY_FOUR_HOURS = timezone.timedelta(hours=24)
+
+ MAINTENANCE_DURATION_CHOICES = (
+ (DURATION_ONE_HOUR, "1 hour"),
+ (DURATION_THREE_HOURS, "3 hours"),
+ (DURATION_SIX_HOURS, "6 hours"),
+ (DURATION_TWELVE_HOURS, "12 hours"),
+ (DURATION_TWENTY_FOUR_HOURS, "24 hours"),
+ )
+
+ maintenance_duration = models.DurationField(default=None, null=True, choices=MAINTENANCE_DURATION_CHOICES)
+ (DEBUG_MAINTENANCE, MAINTENANCE) = range(2)
+
+ DEBUG_MAINTENANCE_KEY = "Debug"
+ MAINTENANCE_KEY = "Maintenance"
+
+ MAINTENANCE_MODE_CHOICES = ((DEBUG_MAINTENANCE, DEBUG_MAINTENANCE_KEY), (MAINTENANCE, MAINTENANCE_KEY))
+ MAINTENANCE_VERBAL = {
+ DEBUG_MAINTENANCE: "Debug (silence all escalations)",
+ MAINTENANCE: "Maintenance (collect everything in one incident)",
+ }
+
+ maintenance_mode = models.IntegerField(default=None, null=True, choices=MAINTENANCE_MODE_CHOICES)
+
+ maintenance_uuid = models.CharField(max_length=250, unique=True, null=True, default=None)
+ maintenance_started_at = models.DateTimeField(null=True, default=None)
+ maintenance_author = models.ForeignKey(
+ "user_management.user", on_delete=models.SET_NULL, null=True, related_name="%(class)s_maintenances_created"
+ )
+
+ def start_disable_maintenance_task(self, countdown):
+ raise NotImplementedError
+
+ def get_organization(self):
+ raise NotImplementedError
+
+ def get_team(self):
+ raise NotImplementedError
+
+ def get_verbal(self):
+ raise NotImplementedError
+
+ def force_disable_maintenance(self, user):
+ raise NotImplementedError
+
+ def notify_about_maintenance_action(self, text, send_to_general_log_channel=True):
+ raise NotImplementedError
+
+ def send_maintenance_incident(self, organization, group, alert):
+ slack_team_identity = organization.slack_team_identity
+ if slack_team_identity is not None:
+ channel_id = organization.general_log_channel_id
+ attachments = group.render_slack_attachments()
+ blocks = group.render_slack_blocks()
+ AlertShootingStep = ScenarioStep.get_step("distribute_alerts", "AlertShootingStep")
+ AlertShootingStep(slack_team_identity, organization).publish_slack_messages(
+ slack_team_identity, group, alert, attachments, channel_id, blocks
+ )
+
+ def start_maintenance(self, mode, maintenance_duration, user):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+ Alert = apps.get_model("alerts", "Alert")
+ OrganizationLogRecord = apps.get_model("base", "OrganizationLogRecord")
+
+ with transaction.atomic():
+ _self = self.__class__.objects.select_for_update().get(pk=self.pk)
+ if _self.maintenance_mode is not None:
+ raise MaintenanceCouldNotBeStartedError("Already on maintenance")
+ organization = _self.get_organization()
+ team = _self.get_team()
+ verbal = _self.get_verbal()
+ user_verbal = user.get_user_verbal_for_team_for_slack()
+ duration_verbal = humanize.naturaldelta(maintenance_duration)
+ # NOTE: there could be multiple maintenance integrations in case of a race condition
+ # (no constraints at the db level, it shouldn't be an issue functionality-wise)
+ maintenance_integration = AlertReceiveChannel.objects_with_maintenance.filter(
+ organization=organization,
+ team=team,
+ integration=AlertReceiveChannel.INTEGRATION_MAINTENANCE,
+ ).last()
+ if maintenance_integration is None:
+ maintenance_integration = AlertReceiveChannel.create(
+ organization=organization,
+ team=team,
+ integration=AlertReceiveChannel.INTEGRATION_MAINTENANCE,
+ )
+
+ maintenance_uuid = _self.start_disable_maintenance_task(maintenance_duration)
+
+ _self.maintenance_duration = timezone.timedelta(seconds=maintenance_duration)
+ _self.maintenance_uuid = maintenance_uuid
+ _self.maintenance_mode = mode
+ _self.maintenance_started_at = timezone.now()
+ _self.maintenance_author = user
+ _self.save(
+ update_fields=[
+ "maintenance_duration",
+ "maintenance_uuid",
+ "maintenance_mode",
+ "maintenance_started_at",
+ "maintenance_author",
+ ]
+ )
+ self.maintenance_duration = _self.maintenance_duration
+ self.maintenance_uuid = _self.maintenance_uuid
+ self.maintenance_mode = _self.maintenance_mode
+ self.maintenance_started_at = _self.maintenance_started_at
+ self.maintenance_author = _self.maintenance_author
+ if mode == AlertReceiveChannel.MAINTENANCE:
+ group = AlertGroup.all_objects.create(
+ distinction=uuid4(),
+ verbose_name=f"Maintenance of {verbal} for {maintenance_duration}",
+ maintenance_uuid=maintenance_uuid,
+ channel_filter_id=maintenance_integration.default_channel_filter.pk,
+ channel=maintenance_integration,
+ )
+ title = f"Maintenance of {verbal} for {duration_verbal}"
+ message = (
+ f"Initiated by {user_verbal}."
+ f" During this time all alerts from integration will be collected here without escalations"
+ )
+ alert = Alert(
+ is_resolve_signal=False,
+ title=title,
+ message=message,
+ group=group,
+ raw_request_data={
+ "title": title,
+ "message": message,
+ },
+ )
+ alert.save()
+ # create team log
+ log_type, object_verbal = OrganizationLogRecord.get_log_type_and_maintainable_object_verbal(self, mode, verbal)
+ description = f"{self.get_maintenance_mode_display()} of {object_verbal} started for {duration_verbal}"
+ create_organization_log(organization, user, log_type, description)
+
+ if mode == AlertReceiveChannel.MAINTENANCE:
+ self.send_maintenance_incident(organization, group, alert)
+ self.notify_about_maintenance_action(
+ f"Maintenance of {verbal}. Initiated by {user_verbal} for {duration_verbal}.",
+ send_to_general_log_channel=False,
+ )
+ else:
+ self.notify_about_maintenance_action(
+ f"Debug of {verbal}. Initiated by {user_verbal} for {duration_verbal}."
+ )
+
+ @property
+ def till_maintenance_timestamp(self):
+ if self.maintenance_started_at is not None and self.maintenance_duration is not None:
+ return int((self.maintenance_started_at + self.maintenance_duration).astimezone(pytz.UTC).timestamp())
+ return None
+
+ @property
+ def started_at_timestamp(self):
+ if self.maintenance_started_at is not None and self.maintenance_duration is not None:
+ return int(self.maintenance_started_at.astimezone(pytz.UTC).timestamp())
+ return None
+
+ @classmethod
+ def maintenance_duration_options_in_seconds(cls):
+ options_in_seconds = []
+ for ch in cls.MAINTENANCE_DURATION_CHOICES:
+ options_in_seconds.append(int(ch[0].total_seconds()))
+ return options_in_seconds
diff --git a/engine/apps/alerts/models/resolution_note.py b/engine/apps/alerts/models/resolution_note.py
new file mode 100644
index 0000000000..a22b74a783
--- /dev/null
+++ b/engine/apps/alerts/models/resolution_note.py
@@ -0,0 +1,208 @@
+import humanize
+from django.conf import settings
+from django.core.validators import MinLengthValidator
+from django.db import models
+from django.utils import timezone
+from rest_framework.fields import DateTimeField
+
+from apps.slack.slack_formatter import SlackFormatter
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+from common.utils import clean_markup
+
+
+def generate_public_primary_key_for_alert_group_postmortem():
+ prefix = "P"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while AlertGroupPostmortem.objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="AlertGroupPostmortem"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+def generate_public_primary_key_for_resolution_note():
+ prefix = "M"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while ResolutionNote.objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="ResolutionNote"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+class ResolutionNoteSlackMessageQueryset(models.QuerySet):
+ def delete(self):
+ resolution_note = self.get_resolution_note()
+ if resolution_note:
+ resolution_note.delete()
+ super().delete()
+
+
+class ResolutionNoteSlackMessage(models.Model):
+ alert_group = models.ForeignKey(
+ "alerts.AlertGroup",
+ on_delete=models.CASCADE,
+ related_name="resolution_note_slack_messages",
+ )
+ user = models.ForeignKey(
+ "user_management.User",
+ null=True,
+ on_delete=models.SET_NULL,
+ related_name="authored_resolution_note_slack_messages",
+ )
+ added_by_user = models.ForeignKey(
+ "user_management.User",
+ null=True,
+ on_delete=models.SET_NULL,
+ related_name="added_resolution_note_slack_messages",
+ )
+ text = models.TextField(max_length=3000, default=None, null=True)
+ slack_channel_id = models.CharField(max_length=100, null=True, default=None)
+ ts = models.CharField(max_length=100, null=True, default=None)
+ thread_ts = models.CharField(max_length=100, null=True, default=None)
+ permalink = models.CharField(max_length=250, null=True, default=None)
+ added_to_resolution_note = models.BooleanField(default=False)
+ posted_by_bot = models.BooleanField(default=False)
+
+ class Meta:
+ unique_together = ("thread_ts", "ts")
+
+ def get_resolution_note(self):
+ try:
+ return self.resolution_note
+ except ResolutionNoteSlackMessage.resolution_note.RelatedObjectDoesNotExist:
+ return None
+
+ def delete(self):
+ resolution_note = self.get_resolution_note()
+ if resolution_note:
+ resolution_note.delete()
+ super().delete()
+
+
+class ResolutionNoteQueryset(models.QuerySet):
+ def delete(self):
+ self.update(deleted_at=timezone.now())
+
+ def hard_delete(self):
+ super().delete()
+
+ def filter(self, *args, **kwargs):
+ return super().filter(*args, **kwargs, deleted_at__isnull=True)
+
+
+class ResolutionNote(models.Model):
+
+ objects = ResolutionNoteQueryset.as_manager()
+ objects_with_deleted = models.Manager()
+
+ class Source(models.IntegerChoices):
+ SLACK = 0, "slack"
+ WEB = 1, "web"
+
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_resolution_note,
+ )
+
+ alert_group = models.ForeignKey(
+ "alerts.AlertGroup",
+ on_delete=models.CASCADE,
+ related_name="resolution_notes",
+ )
+ source = models.IntegerField(choices=Source.choices, default=None, null=True)
+ author = models.ForeignKey(
+ "user_management.User",
+ on_delete=models.SET_NULL,
+ null=True,
+ default=None,
+ related_name="authored_resolution_notes",
+ )
+ message_text = models.TextField(max_length=3000, default=None, null=True)
+ created_at = models.DateTimeField(auto_now_add=True)
+
+ resolution_note_slack_message = models.OneToOneField(
+ "alerts.ResolutionNoteSlackMessage",
+ on_delete=models.SET_NULL,
+ null=True,
+ default=None,
+ related_name="resolution_note",
+ )
+ deleted_at = models.DateTimeField(default=None, null=True)
+
+ def delete(self):
+ ResolutionNote.objects.filter(pk=self.pk).delete()
+
+ def hard_delete(self):
+ super().delete()
+
+ @property
+ def text(self):
+ if self.source == ResolutionNote.Source.SLACK:
+ return self.resolution_note_slack_message.text
+ return self.message_text
+
+ def recreate(self):
+ """
+ Recreates soft-deleted resolution note.
+ E.g. resolution note can be removed and then added again in slack.
+ """
+ self.deleted_at = None
+ self.save(update_fields=["deleted_at"])
+
+ def render_log_line_json(self):
+ time = humanize.naturaldelta(self.alert_group.started_at - self.created_at)
+ created_at = DateTimeField().to_representation(self.created_at)
+ author = self.author.short() if self.author is not None else None
+
+ sf = SlackFormatter(self.alert_group.channel.organization)
+ action = sf.format(self.text)
+ action = clean_markup(action)
+
+ result = {
+ "time": time,
+ "action": action,
+ "realm": "resolution_note",
+ "type": self.source,
+ "created_at": created_at,
+ "author": author,
+ }
+
+ return result
+
+ def author_verbal(self, mention):
+ """
+ Postmortems to resolution notes included migrating AlertGroupPostmortem to ResolutionNotes.
+ But AlertGroupPostmortem has no author field. So this method was introduces as workaround.
+ """
+ if self.author is not None:
+ return self.author.get_user_verbal_for_team_for_slack(mention)
+ else:
+ return ""
+
+
+class AlertGroupPostmortem(models.Model):
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_alert_group_postmortem,
+ )
+ alert_group = models.ForeignKey(
+ "alerts.AlertGroup",
+ on_delete=models.CASCADE,
+ related_name="postmortem_text",
+ )
+ created_at = models.DateTimeField(auto_now_add=True)
+ last_modified = models.DateTimeField(auto_now=True)
+ text = models.TextField(max_length=3000, default=None, null=True)
diff --git a/engine/apps/alerts/models/user_has_notification.py b/engine/apps/alerts/models/user_has_notification.py
new file mode 100644
index 0000000000..967a93cfe2
--- /dev/null
+++ b/engine/apps/alerts/models/user_has_notification.py
@@ -0,0 +1,18 @@
+from django.db import models
+
+
+class UserHasNotification(models.Model):
+ user = models.ForeignKey(
+ "user_management.User",
+ on_delete=models.CASCADE,
+ )
+
+ alert_group = models.ForeignKey(
+ "alerts.AlertGroup",
+ on_delete=models.CASCADE,
+ )
+
+ active_notification_policy_id = models.CharField(max_length=100, null=True, default=None) # ID generated by celery
+
+ class Meta:
+ unique_together = ("user", "alert_group")
diff --git a/engine/apps/alerts/representative.py b/engine/apps/alerts/representative.py
new file mode 100644
index 0000000000..3b266d1ee2
--- /dev/null
+++ b/engine/apps/alerts/representative.py
@@ -0,0 +1,23 @@
+import logging
+from abc import ABC, abstractmethod
+
+from django.apps import apps
+
+logger = logging.getLogger(__name__)
+
+
+class AlertGroupAbstractRepresentative(ABC):
+ HANDLER_PREFIX = "on_"
+
+ @abstractmethod
+ def is_applicable(self):
+ return None
+
+ @staticmethod
+ def get_handlers_map():
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ return AlertGroupLogRecord.ACTIONS_TO_HANDLERS_MAP
+
+ @classmethod
+ def on_create_alert(cls, **kwargs):
+ raise NotImplementedError
diff --git a/engine/apps/alerts/signals.py b/engine/apps/alerts/signals.py
new file mode 100644
index 0000000000..c3934d05f6
--- /dev/null
+++ b/engine/apps/alerts/signals.py
@@ -0,0 +1,59 @@
+import django.dispatch
+
+from apps.slack.representatives.alert_group_representative import AlertGroupSlackRepresentative
+from apps.slack.representatives.user_representative import UserSlackRepresentative
+
+"""
+There are three entities which require sync between web, slack and telegram.
+AlertGroup, AlertGroup's logs and AlertGroup's resolution notes.
+"""
+# Signal to create alert group message in all connected integrations (Slack, Telegram)
+alert_create_signal = django.dispatch.Signal(
+ providing_args=[
+ "alert",
+ ]
+)
+
+# Signal to rerender alert group in all connected integrations (Slack, Telegram) when its state is changed
+alert_group_action_triggered_signal = django.dispatch.Signal(
+ providing_args=[
+ "log_record",
+ "action_source",
+ ]
+)
+
+# Signal to rerender alert group's log message in all connected integrations (Slack, Telegram)
+# when alert group state is changed
+alert_group_update_log_report_signal = django.dispatch.Signal(providing_args=["alert_group"])
+
+# Signal to rerender alert group's resolution note in all connected integrations (Slack)
+alert_group_update_resolution_note_signal = django.dispatch.Signal(
+ providing_args=[
+ "alert_group",
+ "resolution_note",
+ ]
+)
+
+# Currently only writes error in Slack thread while notify user. Maybe it is worth to delete it?
+user_notification_action_triggered_signal = django.dispatch.Signal(providing_args=["log_record"])
+
+alert_create_signal.connect(
+ AlertGroupSlackRepresentative.on_create_alert,
+)
+
+
+alert_group_action_triggered_signal.connect(
+ AlertGroupSlackRepresentative.on_alert_group_action_triggered,
+)
+
+alert_group_update_log_report_signal.connect(
+ AlertGroupSlackRepresentative.on_alert_group_update_log_report,
+)
+
+alert_group_update_resolution_note_signal.connect(
+ AlertGroupSlackRepresentative.on_alert_group_update_resolution_note,
+)
+
+user_notification_action_triggered_signal.connect(
+ UserSlackRepresentative.on_user_action_triggered,
+)
diff --git a/engine/apps/alerts/tasks/__init__.py b/engine/apps/alerts/tasks/__init__.py
new file mode 100644
index 0000000000..8e0e994f9f
--- /dev/null
+++ b/engine/apps/alerts/tasks/__init__.py
@@ -0,0 +1,27 @@
+from .acknowledge_reminder import acknowledge_reminder_task # noqa: F401
+from .cache_alert_group_for_web import cache_alert_group_for_web, schedule_cache_for_alert_group # noqa: F401
+from .calculcate_escalation_finish_time import calculate_escalation_finish_time # noqa
+from .call_ack_url import call_ack_url # noqa: F401
+from .check_escalation_finished import check_escalation_finished_task # noqa: F401
+from .create_contact_points_for_datasource import create_contact_points_for_datasource # noqa: F401
+from .custom_button_result import custom_button_result # noqa: F401
+from .delete_alert_group import delete_alert_group # noqa: F401
+from .distribute_alert import distribute_alert # noqa: F401
+from .escalate_alert_group import escalate_alert_group # noqa: F401
+from .invalidate_web_cache_for_alert_group import invalidate_web_cache_for_alert_group # noqa: F401
+from .invite_user_to_join_incident import invite_user_to_join_incident # noqa: F401
+from .maintenance import disable_maintenance # noqa: F401
+from .notify_all import notify_all_task # noqa: F401
+from .notify_group import notify_group_task # noqa: F401
+from .notify_ical_schedule_shift import notify_ical_schedule_shift # noqa: F401
+from .notify_user import notify_user_task # noqa: F401
+from .resolve_alert_group_by_source_if_needed import resolve_alert_group_by_source_if_needed # noqa: F401
+from .resolve_alert_group_if_needed import resolve_alert_group_if_needed # noqa: F401
+from .resolve_by_last_step import resolve_by_last_step_task # noqa: F401
+from .send_alert_group_signal import send_alert_group_signal # noqa: F401
+from .send_update_log_report_signal import send_update_log_report_signal # noqa: F401
+from .send_update_postmortem_signal import send_update_postmortem_signal # noqa: F401
+from .send_update_resolution_note_signal import send_update_resolution_note_signal # noqa: F401
+from .sync_grafana_alerting_contact_points import sync_grafana_alerting_contact_points # noqa: F401
+from .unsilence import unsilence_task # noqa: F401
+from .wipe import wipe # noqa: F401
diff --git a/engine/apps/alerts/tasks/acknowledge_reminder.py b/engine/apps/alerts/tasks/acknowledge_reminder.py
new file mode 100644
index 0000000000..6cbf86a6c3
--- /dev/null
+++ b/engine/apps/alerts/tasks/acknowledge_reminder.py
@@ -0,0 +1,137 @@
+from django.apps import apps
+from django.conf import settings
+from django.db import transaction
+
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+from .send_alert_group_signal import send_alert_group_signal
+from .task_logger import task_logger
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def acknowledge_reminder_task(alert_group_pk, unacknowledge_process_id):
+ Organization = apps.get_model("user_management", "Organization")
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ log_record = None
+
+ task_logger.info(f"Starting a reminder task for acknowledgement timeout with process id {unacknowledge_process_id}")
+ with transaction.atomic():
+ try:
+ alert_group = AlertGroup.unarchived_objects.filter(pk=alert_group_pk).select_for_update()[
+ 0
+ ] # Lock alert_group:
+ except IndexError:
+ return f"acknowledge_reminder_task: Alert group with pk {alert_group_pk} doesn't exist"
+
+ if alert_group.last_unique_unacknowledge_process_id == unacknowledge_process_id:
+ alert_group.acknowledged_by_confirmed = None
+ alert_group.save(update_fields=["acknowledged_by_confirmed"])
+ if alert_group.status == AlertGroup.ACKNOWLEDGED and alert_group.is_root_alert_group:
+ if alert_group.acknowledged and alert_group.acknowledged_by == AlertGroup.USER:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ACK_REMINDER_TRIGGERED,
+ author=alert_group.acknowledged_by_user,
+ alert_group=alert_group,
+ )
+ seconds_unack = Organization.UNACKNOWLEDGE_TIMEOUT_DELAY[
+ alert_group.channel.organization.unacknowledge_timeout
+ ]
+ if (
+ alert_group.channel.organization.unacknowledge_timeout
+ != Organization.UNACKNOWLEDGE_TIMEOUT_NEVER
+ ):
+ unacknowledge_timeout_task.apply_async(
+ (alert_group.pk, unacknowledge_process_id),
+ countdown=seconds_unack,
+ )
+ else:
+ if (
+ alert_group.channel.organization.acknowledge_remind_timeout
+ != Organization.ACKNOWLEDGE_REMIND_NEVER
+ ):
+ seconds_remind = Organization.ACKNOWLEDGE_REMIND_DELAY[
+ alert_group.channel.organization.acknowledge_remind_timeout
+ ]
+ acknowledge_reminder_task.apply_async(
+ (
+ alert_group.pk,
+ unacknowledge_process_id,
+ ),
+ countdown=seconds_remind,
+ )
+ if log_record is not None:
+ log_record.save()
+ task_logger.debug(
+ f"call send_alert_group_signal for alert_group {alert_group_pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}'"
+ )
+ transaction.on_commit(lambda: send_alert_group_signal.apply_async((log_record.pk,)))
+
+ task_logger.info(f"Finished a reminder task for acknowledgement timeout with process id {unacknowledge_process_id}")
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def unacknowledge_timeout_task(alert_group_pk, unacknowledge_process_id):
+ Organization = apps.get_model("user_management", "Organization")
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ log_record = None
+
+ task_logger.info(
+ f"Starting an unacknowledge task " f"for acknowledgement timeout with process id {unacknowledge_process_id}"
+ )
+ with transaction.atomic():
+ try:
+ alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk).select_for_update()[0] # Lock alert_group:
+ except IndexError:
+ return f"unacknowledge_timeout_task: Alert group with pk {alert_group_pk} doesn't exist"
+
+ if unacknowledge_process_id == alert_group.last_unique_unacknowledge_process_id:
+ if (
+ not alert_group.resolved
+ and not alert_group.is_archived
+ and alert_group.acknowledged
+ and alert_group.is_root_alert_group
+ ):
+ if not alert_group.acknowledged_by_confirmed:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_AUTO_UN_ACK,
+ author=alert_group.acknowledged_by_user,
+ alert_group=alert_group,
+ )
+ alert_group.unacknowledge()
+ alert_group.start_escalation_if_needed()
+ else:
+ seconds_remind = Organization.ACKNOWLEDGE_REMIND_DELAY[
+ alert_group.channel.organization.acknowledge_remind_timeout
+ ]
+ seconds_unack = Organization.UNACKNOWLEDGE_TIMEOUT_DELAY[
+ alert_group.channel.organization.unacknowledge_timeout
+ ]
+ seconds = seconds_remind - seconds_unack
+ acknowledge_reminder_task.apply_async(
+ (
+ alert_group_pk,
+ unacknowledge_process_id,
+ ),
+ countdown=seconds,
+ )
+
+ if log_record is not None:
+ log_record.save()
+ task_logger.debug(
+ f"call send_alert_group_signal for alert_group {alert_group_pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}'"
+ )
+ transaction.on_commit(lambda: send_alert_group_signal.apply_async((log_record.pk,)))
+
+ task_logger.info(
+ f"Starting an unacknowledge task for acknowledgement timeout with process id {unacknowledge_process_id}"
+ )
diff --git a/engine/apps/alerts/tasks/cache_alert_group_for_web.py b/engine/apps/alerts/tasks/cache_alert_group_for_web.py
new file mode 100644
index 0000000000..677e0a1973
--- /dev/null
+++ b/engine/apps/alerts/tasks/cache_alert_group_for_web.py
@@ -0,0 +1,54 @@
+from celery.utils.log import get_task_logger
+from django.apps import apps
+from django.conf import settings
+from django.core.cache import cache
+
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+logger = get_task_logger(__name__)
+
+
+def get_cache_key_caching_alert_group_for_web(alert_group_pk):
+ CACHE_KEY_PREFIX = "cache_alert_group_for_web"
+ return f"{CACHE_KEY_PREFIX}_{alert_group_pk}"
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def schedule_cache_for_alert_group(alert_group_pk):
+ CACHE_FOR_ALERT_GROUP_LIFETIME = 60
+ START_CACHE_DELAY = 5 # we introduce delay to avoid recaching after each alert.
+
+ task = cache_alert_group_for_web.apply_async(args=[alert_group_pk], countdown=START_CACHE_DELAY)
+ cache_key = get_cache_key_caching_alert_group_for_web(alert_group_pk)
+ cache.set(cache_key, task.id, timeout=CACHE_FOR_ALERT_GROUP_LIFETIME)
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def cache_alert_group_for_web(alert_group_pk):
+ """
+ Async task to re-cache alert_group for web.
+ """
+ cache_key = get_cache_key_caching_alert_group_for_web(alert_group_pk)
+ cached_task_id = cache.get(cache_key)
+ current_task_id = cache_alert_group_for_web.request.id
+
+ if cached_task_id is None:
+ return (
+ f"cache_alert_group_for_web skipped, because of current task_id ({current_task_id})"
+ f" for alert_group {alert_group_pk} doesn't exist in cache, which means this task is not"
+ f" relevant: cache was dropped by engine restart ot CACHE_FOR_ALERT_GROUP_LIFETIME"
+ )
+ if not current_task_id == cached_task_id or cached_task_id is None:
+ return (
+ f"cache_alert_group_for_web skipped, because of current task_id ({current_task_id})"
+ f" doesn't equal to cached task_id ({cached_task_id}) for alert_group {alert_group_pk},"
+ )
+ else:
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ alert_group = AlertGroup.all_objects.using_readonly_db.get(pk=alert_group_pk)
+ alert_group.cache_for_web(alert_group.channel.organization)
+ logger.info(f"cache_alert_group_for_web: cache refreshed for alert_group {alert_group_pk}")
diff --git a/engine/apps/alerts/tasks/calculcate_escalation_finish_time.py b/engine/apps/alerts/tasks/calculcate_escalation_finish_time.py
new file mode 100644
index 0000000000..ef1483c107
--- /dev/null
+++ b/engine/apps/alerts/tasks/calculcate_escalation_finish_time.py
@@ -0,0 +1,15 @@
+from django.apps import apps
+from django.conf import settings
+
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def calculate_escalation_finish_time(alert_group_pk):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk)[0]
+ if alert_group.escalation_snapshot:
+ alert_group.estimate_escalation_finish_time = alert_group.calculate_eta_for_finish_escalation()
+ alert_group.save(update_fields=["estimate_escalation_finish_time"])
diff --git a/engine/apps/alerts/tasks/call_ack_url.py b/engine/apps/alerts/tasks/call_ack_url.py
new file mode 100644
index 0000000000..a9feeed1c1
--- /dev/null
+++ b/engine/apps/alerts/tasks/call_ack_url.py
@@ -0,0 +1,49 @@
+from django.apps import apps
+
+from apps.alerts.utils import render_curl_command, request_outgoing_webhook
+from apps.slack.slack_client import SlackClientWithErrorHandling
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=2)
+def call_ack_url(ack_url, alert_group_pk, channel, http_method="GET"):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ SlackMessage = apps.get_model("slack", "SlackMessage")
+ alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk)[0]
+ is_successful, result_message = request_outgoing_webhook(ack_url, http_method)
+
+ if is_successful:
+ alert_group.acknowledged_on_source = True
+ alert_group.save()
+ debug_message = ""
+ info_message = "OnCall successfully sent {} request to acknowledge alert on the source".format(http_method)
+ else:
+ curl_request = render_curl_command(ack_url, http_method)
+ debug_message = "```{}```".format(curl_request)
+ info_message = "OnCall attempted to acknowledge alert on the source with the result: `{}`".format(
+ result_message
+ )
+
+ sc = (
+ SlackClientWithErrorHandling(alert_group.channel.organization.slack_team_identity.bot_access_token)
+ if channel is not None
+ else None
+ )
+
+ if channel is not None:
+ result = sc.api_call(
+ "chat.postMessage",
+ channel=channel,
+ attachments=[
+ {"callback_id": "alert", "text": "{}".format(debug_message), "footer": "{}".format(info_message)},
+ ],
+ thread_ts=alert_group.slack_message.slack_id,
+ mrkdwn=True,
+ )
+ SlackMessage(
+ slack_id=result["ts"],
+ organization=alert_group.channel.organization,
+ _slack_team_identity=alert_group.channel.organization.slack_team_identity,
+ channel_id=channel,
+ alert_group=alert_group,
+ ).save()
diff --git a/engine/apps/alerts/tasks/check_escalation_finished.py b/engine/apps/alerts/tasks/check_escalation_finished.py
new file mode 100644
index 0000000000..64f44d3d79
--- /dev/null
+++ b/engine/apps/alerts/tasks/check_escalation_finished.py
@@ -0,0 +1,48 @@
+from django.apps import apps
+from django.conf import settings
+from django.db.models import Q
+from django.utils import timezone
+
+from apps.alerts.tasks.task_logger import task_logger
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None, default_retry_delay=60
+)
+def check_escalation_finished_task():
+ """
+ This task periodically checks if there are no alert groups with not finished escalations.
+ TODO: QA this properly, check if new type of escalations had been added
+ """
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+
+ CHECKING_TOLERANCE = timezone.timedelta(minutes=5)
+ CHECKING_TIME = timezone.now() - CHECKING_TOLERANCE
+
+ alert_groups = AlertGroup.all_objects.filter(
+ ~Q(channel__integration=AlertReceiveChannel.INTEGRATION_MAINTENANCE),
+ ~Q(silenced=True, silenced_until__isnull=True), # filter silenced forever alert_groups
+ is_escalation_finished=False,
+ resolved=False,
+ acknowledged=False,
+ root_alert_group=None,
+ estimate_escalation_finish_time__lte=CHECKING_TIME,
+ )
+
+ if not alert_groups.exists():
+ return
+
+ exception_template = "Escalation for alert_group {} is not finished at expected time {}, now {}"
+
+ now = timezone.now()
+ exception_text = "\n".join(
+ exception_template.format(alert_group.pk, alert_group.estimate_escalation_finish_time, now)
+ for alert_group in alert_groups
+ )
+
+ ids = alert_groups.values_list("pk", flat=True)
+ task_logger.debug(ids)
+
+ raise Exception(exception_text)
diff --git a/engine/apps/alerts/tasks/compare_escalations.py b/engine/apps/alerts/tasks/compare_escalations.py
new file mode 100644
index 0000000000..6187fa6565
--- /dev/null
+++ b/engine/apps/alerts/tasks/compare_escalations.py
@@ -0,0 +1,4 @@
+def compare_escalations(request_id, active_escalation_id):
+ if request_id == active_escalation_id:
+ return True
+ return False
diff --git a/engine/apps/alerts/tasks/create_contact_points_for_datasource.py b/engine/apps/alerts/tasks/create_contact_points_for_datasource.py
new file mode 100644
index 0000000000..f3dc3f4bd8
--- /dev/null
+++ b/engine/apps/alerts/tasks/create_contact_points_for_datasource.py
@@ -0,0 +1,44 @@
+from django.apps import apps
+from rest_framework import status
+
+from apps.grafana_plugin.helpers import GrafanaAPIClient
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=10)
+def create_contact_points_for_datasource(alert_receive_channel_id, datasource_list):
+ """
+ Try to create contact points for other datasource.
+ Restart task for datasource, for which contact point was not created.
+ """
+
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+
+ alert_receive_channel = AlertReceiveChannel.objects.get(pk=alert_receive_channel_id)
+
+ client = GrafanaAPIClient(
+ api_url=alert_receive_channel.organization.grafana_url,
+ api_token=alert_receive_channel.organization.api_token,
+ )
+ # list of datasource for which contact point creation was failed
+ datasource_to_create = []
+ for datasource in datasource_list:
+ contact_point = None
+ config, response_info = client.get_alerting_config(datasource["id"])
+ if config is None:
+ if response_info.get("status_code") == status.HTTP_404_NOT_FOUND:
+ client.get_alertmanager_status_with_config(datasource["id"])
+ contact_point = alert_receive_channel.grafana_alerting_sync_manager.create_contact_point(datasource)
+ else:
+ contact_point = alert_receive_channel.grafana_alerting_sync_manager.create_contact_point(datasource)
+ if contact_point is None:
+ # Failed to create contact point duo to getting wrong alerting config.
+ # Add datasource to list and retry to create contact point for it again
+ datasource_to_create.append(datasource)
+
+ # if some contact points were not created, restart task for them
+ if datasource_to_create:
+ create_contact_points_for_datasource.apply_async((alert_receive_channel_id, datasource_to_create), countdown=5)
+ else:
+ alert_receive_channel.is_finished_alerting_setup = True
+ alert_receive_channel.save(update_fields=["is_finished_alerting_setup"])
diff --git a/engine/apps/alerts/tasks/custom_button_result.py b/engine/apps/alerts/tasks/custom_button_result.py
new file mode 100644
index 0000000000..e7e826a55f
--- /dev/null
+++ b/engine/apps/alerts/tasks/custom_button_result.py
@@ -0,0 +1,89 @@
+import json
+import logging
+
+from django.apps import apps
+from django.conf import settings
+from django.db import transaction
+from jinja2 import TemplateError
+
+from apps.alerts.utils import request_outgoing_webhook
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+from .send_alert_group_signal import send_alert_group_signal
+from .task_logger import task_logger
+
+logger = logging.getLogger(__name__)
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def custom_button_result(custom_button_pk, alert_group_pk, user_pk=None, escalation_policy_pk=None):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ EscalationPolicy = apps.get_model("alerts", "EscalationPolicy")
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ CustomButton = apps.get_model("alerts", "CustomButton")
+ User = apps.get_model("user_management", "User")
+
+ task_logger.debug(
+ f"Start custom_button_result for alert_group {alert_group_pk}, " f"custom_button {custom_button_pk}"
+ )
+ try:
+ custom_button = CustomButton.objects.get(pk=custom_button_pk)
+ except CustomButton.DoesNotExist:
+ task_logger.info(f"Custom_button {custom_button_pk} for alert_group {alert_group_pk} does not exist")
+ return
+
+ alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk)[0]
+ escalation_policy = EscalationPolicy.objects.filter(pk=escalation_policy_pk).first()
+ task_logger.debug(
+ f"Start getting data for request in custom_button_result task for alert_group {alert_group_pk}, "
+ f"custom_button {custom_button_pk}"
+ )
+
+ first_alert = alert_group.alerts.first()
+
+ try:
+ post_kwargs = custom_button.build_post_kwargs(first_alert)
+ except TemplateError:
+ is_request_successful = False
+ result_message = "Template error"
+ except json.JSONDecodeError as e:
+ task_logger.error(
+ f"Failed to send build_post_kwargs for alert_group {alert_group_pk}, " f"custom_button {custom_button_pk}"
+ )
+ raise e
+ else:
+ is_request_successful, result_message = request_outgoing_webhook(
+ custom_button.webhook, "POST", post_kwargs=post_kwargs
+ )
+
+ task_logger.debug(
+ f"Send post request in custom_button_result task for alert_group {alert_group_pk}, "
+ f"custom_button {custom_button_pk}"
+ )
+ with transaction.atomic():
+ user = None
+ if user_pk:
+ user = User.objects.get(pk=user_pk)
+
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_CUSTOM_BUTTON_TRIGGERED,
+ alert_group=alert_group,
+ custom_button=custom_button,
+ author=user,
+ reason=result_message,
+ step_specific_info={
+ "custom_button_name": custom_button.name,
+ "is_request_successful": is_request_successful,
+ },
+ escalation_policy=escalation_policy,
+ escalation_policy_step=EscalationPolicy.STEP_TRIGGER_CUSTOM_BUTTON,
+ )
+ log_record.save()
+ task_logger.debug(
+ f"call send_alert_group_signal for alert_group {alert_group_pk}, "
+ f"log record {log_record.pk} with type '{log_record.get_type_display()}'"
+ )
+ transaction.on_commit(lambda: send_alert_group_signal.apply_async((log_record.pk,)))
+ task_logger.debug(f"Finish custom_button_result for alert_group {alert_group_pk}, custom_button {custom_button_pk}")
diff --git a/engine/apps/alerts/tasks/delete_alert_group.py b/engine/apps/alerts/tasks/delete_alert_group.py
new file mode 100644
index 0000000000..06e4d5b2ae
--- /dev/null
+++ b/engine/apps/alerts/tasks/delete_alert_group.py
@@ -0,0 +1,26 @@
+from celery.utils.log import get_task_logger
+from django.apps import apps
+from django.conf import settings
+
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+logger = get_task_logger(__name__)
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def delete_alert_group(alert_group_pk, user_pk):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ User = apps.get_model("user_management", "User")
+ alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk).first()
+ if not alert_group:
+ logger.debug("Alert group not found, skipping delete_alert_group")
+ return
+
+ user = User.objects.filter(pk=user_pk).first()
+ if not user:
+ logger.debug("User not found, skipping delete_alert_group")
+ return
+
+ alert_group.delete_by_user(user)
diff --git a/engine/apps/alerts/tasks/distribute_alert.py b/engine/apps/alerts/tasks/distribute_alert.py
new file mode 100644
index 0000000000..01e12f3cdd
--- /dev/null
+++ b/engine/apps/alerts/tasks/distribute_alert.py
@@ -0,0 +1,50 @@
+from django.apps import apps
+from django.conf import settings
+
+from apps.alerts.constants import TASK_DELAY_SECONDS
+from apps.alerts.signals import alert_create_signal
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+from .task_logger import task_logger
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None, default_retry_delay=60
+)
+def distribute_alert(alert_id):
+ """
+ We need this task to make task processing async and to make sure the task is delivered.
+ """
+ Alert = apps.get_model("alerts", "Alert")
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+
+ alert = Alert.objects.get(pk=alert_id)
+
+ task_logger.debug(f"Start distribute_alert for alert {alert_id} from alert_group {alert.group_id}")
+ send_alert_create_signal.apply_async((alert_id,))
+
+ alert_group = AlertGroup.all_objects.filter(pk=alert.group_id).get()
+
+ # If it's the first alert, let's launch the escalation!
+ if alert.is_the_first_alert_in_group:
+ alert_group.start_escalation_if_needed(countdown=TASK_DELAY_SECONDS)
+
+ updated_rows = Alert.objects.filter(pk=alert_id, delivered=True).update(delivered=True)
+ if updated_rows != 1:
+ task_logger.critical(
+ f"Tried to mark alert {alert_id} as delivered but it's already marked as delivered. Possible concurrency issue."
+ )
+
+ task_logger.debug(f"Finish distribute_alert for alert {alert_id} from alert_group {alert.group_id}")
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def send_alert_create_signal(alert_id):
+ task_logger.debug(f"Started send_alert_create_signal task for alert {alert_id}")
+ alert_create_signal.send(
+ sender=send_alert_create_signal,
+ alert=alert_id,
+ )
+ task_logger.debug(f"Finished send_alert_create_signal task for alert {alert_id} ")
diff --git a/engine/apps/alerts/tasks/escalate_alert_group.py b/engine/apps/alerts/tasks/escalate_alert_group.py
new file mode 100644
index 0000000000..0d9d454852
--- /dev/null
+++ b/engine/apps/alerts/tasks/escalate_alert_group.py
@@ -0,0 +1,102 @@
+from django.apps import apps
+from django.conf import settings
+from django.db import transaction
+from kombu import uuid as celery_uuid
+
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+from .compare_escalations import compare_escalations
+from .task_logger import task_logger
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def escalate_alert_group(alert_group_pk):
+ """
+ This task is on duty to send escalated alerts and schedule further escalation.
+ """
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+
+ task_logger.debug(f"Start escalate_alert_group for alert_group {alert_group_pk}")
+
+ log_message = ""
+
+ with transaction.atomic():
+ try:
+ alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk).select_for_update()[0] # Lock alert_group:
+ except IndexError:
+ return f"Alert group with pk {alert_group_pk} doesn't exist"
+
+ if (
+ alert_group.channel.maintenance_mode is not None
+ or alert_group.channel.organization.maintenance_mode is not None
+ ):
+ task_logger.info(f"alert_group {alert_group.pk} organization or alert_receive_channel on maintenance.")
+ alert_group.stop_escalation()
+ return
+
+ if not compare_escalations(escalate_alert_group.request.id, alert_group.active_escalation_id):
+ return "Active escalation ID mismatch. Duplication or non-active escalation triggered. Active: {}".format(
+ alert_group.active_escalation_id
+ )
+
+ if alert_group.resolved or alert_group.acknowledged or alert_group.is_silenced_forever:
+ task_logger.info(f"alert_group {alert_group.pk} resolved, acked or silenced forever. No need to escalate.")
+ alert_group.stop_escalation()
+ return
+
+ if alert_group.is_silenced_for_period:
+ # escalation will be restarted by unsilence_task
+ task_logger.info(
+ f"alert_group {alert_group.pk} silenced for period. Escalation will be restarted by unsilence_task"
+ )
+ return
+
+ if alert_group.root_alert_group is not None:
+ # TODO: consistent_is_escalation_finished remove this check for is_escalation_finished
+ return "Alert is dependent on another. No need to activate escalation."
+
+ if alert_group.is_archived:
+ # TODO: consistent_is_escalation_finished remove this check for is_escalation_finished
+ return "Escalation stopped. Reason: incident is archived. Escalation id: {}".format(
+ alert_group.active_escalation_id
+ )
+
+ if alert_group.wiped_at is not None:
+ # TODO: consistent_is_escalation_finished remove this check for is_escalation_finished
+ return "Alert is wiped. No need to activate escalation."
+
+ escalation_snapshot = alert_group.escalation_snapshot
+
+ if escalation_snapshot is None:
+ return (
+ f"alert_group {alert_group_pk} has no saved escalation snapshot. "
+ f"Probably its channel filter was deleted or has no attached escalation chain."
+ )
+
+ escalation_snapshot.execute_actual_escalation_step()
+
+ alert_group.raw_escalation_snapshot = escalation_snapshot.convert_to_dict()
+
+ if escalation_snapshot.stop_escalation:
+ alert_group.is_escalation_finished = True
+ alert_group.save(update_fields=["is_escalation_finished", "raw_escalation_snapshot"])
+ log_message += "Alert lifecycle finished. OnCall will be silent about this incident from now. "
+ elif escalation_snapshot.pause_escalation:
+ alert_group.save(update_fields=["raw_escalation_snapshot"])
+ log_message += "Escalation is paused. "
+ else:
+ eta = escalation_snapshot.next_step_eta
+
+ task_id = celery_uuid()
+ alert_group.active_escalation_id = task_id
+ transaction.on_commit(
+ lambda: escalate_alert_group.apply_async((alert_group.pk,), immutable=True, eta=eta, task_id=task_id)
+ )
+ alert_group.save(update_fields=["active_escalation_id", "raw_escalation_snapshot"])
+ log_message += "Next escalation poked, id: {} ".format(task_id)
+
+ task_logger.debug(f"end of transaction in escalate_alert_group for alert_group {alert_group_pk}")
+ task_logger.debug(f"Finish escalate_alert_group for alert_group {alert_group_pk}")
+ return log_message + "Escalation executed."
diff --git a/engine/apps/alerts/tasks/invalidate_web_cache_for_alert_group.py b/engine/apps/alerts/tasks/invalidate_web_cache_for_alert_group.py
new file mode 100644
index 0000000000..d9c7c4f988
--- /dev/null
+++ b/engine/apps/alerts/tasks/invalidate_web_cache_for_alert_group.py
@@ -0,0 +1,32 @@
+from django.apps import apps
+from django.conf import settings
+
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+from .task_logger import task_logger
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def invalidate_web_cache_for_alert_group(org_pk=None, channel_pk=None, alert_group_pk=None, alert_group_pks=None):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ DynamicSetting = apps.get_model("base", "DynamicSetting")
+
+ if channel_pk:
+ task_logger.debug(f"invalidate_web_cache_for_alert_group: Reason - alert_receive_channel {channel_pk}")
+ q = AlertGroup.all_objects.filter(channel__pk=channel_pk)
+ elif org_pk:
+ task_logger.debug(f"invalidate_web_cache_for_alert_group: Reason - organization {org_pk}")
+ q = AlertGroup.all_objects.filter(channel__organization__pk=org_pk)
+ elif alert_group_pk:
+ task_logger.debug(f"invalidate_web_cache_for_alert_group: Reason - alert_group {alert_group_pk}")
+ q = AlertGroup.all_objects.filter(pk=alert_group_pk)
+ elif alert_group_pks:
+ task_logger.debug(f"invalidate_web_cache_for_alert_group: Reason - alert_groups {alert_group_pks}")
+ q = AlertGroup.all_objects.filter(pk__in=alert_group_pks)
+
+ skip_task = DynamicSetting.objects.get_or_create(name="skip_invalidate_web_cache_for_alert_group")[0]
+ if skip_task.boolean_value:
+ return "Task has been skipped because of skip_invalidate_web_cache_for_alert_group DynamicSetting"
+ q.update(cached_render_for_web={})
diff --git a/engine/apps/alerts/tasks/invite_user_to_join_incident.py b/engine/apps/alerts/tasks/invite_user_to_join_incident.py
new file mode 100644
index 0000000000..708b96ee86
--- /dev/null
+++ b/engine/apps/alerts/tasks/invite_user_to_join_incident.py
@@ -0,0 +1,71 @@
+import humanize
+from django.apps import apps
+from django.conf import settings
+from django.db import transaction
+from django.db.models import F
+
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+from .notify_user import notify_user_task
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def invite_user_to_join_incident(invitation_pk):
+ Invitation = apps.get_model("alerts", "Invitation")
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ with transaction.atomic():
+ try:
+ invitation = Invitation.objects.filter(pk=invitation_pk).select_for_update()[0]
+ except IndexError:
+ return f"invite_user_to_join_incident: Invitation with pk {invitation_pk} doesn't exist"
+
+ if not invitation.is_active or invitation.alert_group.is_archived:
+ return None
+ if invitation.attempts_left <= 0 or invitation.alert_group.resolved:
+ invitation.is_active = False
+ invitation.save(update_fields=["is_active"])
+ return None
+
+ delay = Invitation.get_delay_by_attempt(invitation.attempt)
+
+ user_verbal = invitation.author.get_user_verbal_for_team_for_slack(mention=True)
+ reason = "Invitation activated by {}. Will try again in {} (attempt {}/{})".format(
+ user_verbal,
+ humanize.naturaldelta(delay),
+ invitation.attempt + 1,
+ Invitation.ATTEMPTS_LIMIT,
+ )
+
+ notify_task = notify_user_task.signature(
+ (
+ invitation.invitee.pk,
+ invitation.alert_group.pk,
+ ),
+ {
+ "reason": reason,
+ "notify_even_acknowledged": True,
+ "notify_anyway": True,
+ "important": True,
+ },
+ immutable=True,
+ )
+
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_INVITATION_TRIGGERED,
+ author=None,
+ alert_group=invitation.alert_group,
+ invitation=invitation,
+ )
+ log_record.save()
+
+ invitation_task = invite_user_to_join_incident.signature(
+ (invitation.pk,), countdown=delay.total_seconds(), immutable=True
+ )
+ notify_task.apply_async()
+ invitation_task.apply_async()
+
+ invitation.attempt = F("attempt") + 1
+ invitation.save()
diff --git a/engine/apps/alerts/tasks/maintenance.py b/engine/apps/alerts/tasks/maintenance.py
new file mode 100644
index 0000000000..bdfca60a8b
--- /dev/null
+++ b/engine/apps/alerts/tasks/maintenance.py
@@ -0,0 +1,138 @@
+from django.apps import apps
+from django.conf import settings
+from django.db import transaction
+from django.db.models import ExpressionWrapper, F, fields
+from django.utils import timezone
+
+from apps.user_management.organization_log_creator import create_organization_log
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+from .task_logger import task_logger
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def disable_maintenance(*args, **kwargs):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ OrganizationLogRecord = apps.get_model("base", "OrganizationLogRecord")
+ User = apps.get_model("user_management", "User")
+ Organization = apps.get_model("user_management", "Organization")
+ user = None
+ object_under_maintenance = None
+ user_id = kwargs.get("user_id")
+ if user_id:
+ user = User.objects.get(pk=user_id)
+
+ force = kwargs.get("force", False)
+
+ with transaction.atomic():
+ if "alert_receive_channel_id" in kwargs:
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+
+ alert_receive_channel_id = kwargs["alert_receive_channel_id"]
+ try:
+ object_under_maintenance = AlertReceiveChannel.objects.select_for_update().get(
+ pk=alert_receive_channel_id,
+ )
+ except AlertReceiveChannel.DoesNotExist:
+ task_logger.info(
+ f"AlertReceiveChannel for disable_maintenance does not exists. Id: {alert_receive_channel_id}"
+ )
+ elif "organization_id" in kwargs:
+ organization_id = kwargs["organization_id"]
+ try:
+ object_under_maintenance = Organization.objects.select_for_update().get(pk=organization_id)
+ except Organization.DoesNotExist:
+ task_logger.info(f"Organization for disable_maintenance does not exists. Id: {organization_id}")
+
+ else:
+ task_logger.info(f"Invalid instance id passed in disable_maintenance. Got: {kwargs}")
+
+ if object_under_maintenance is not None and (
+ disable_maintenance.request.id == object_under_maintenance.maintenance_uuid or force
+ ):
+ verbal = object_under_maintenance.get_verbal()
+ log_type, object_verbal = OrganizationLogRecord.get_log_type_and_maintainable_object_verbal(
+ object_under_maintenance,
+ object_under_maintenance.maintenance_mode,
+ verbal,
+ stopped=True,
+ )
+ description = (
+ f"{object_under_maintenance.get_maintenance_mode_display()} of {object_verbal} "
+ f"stopped{' by user' if user else ''}"
+ )
+ organization = (
+ object_under_maintenance
+ if isinstance(object_under_maintenance, Organization)
+ else object_under_maintenance.organization
+ )
+ create_organization_log(organization, user, log_type, description)
+ if object_under_maintenance.maintenance_mode == object_under_maintenance.MAINTENANCE:
+ mode_verbal = "Maintenance"
+ maintenance_incident = AlertGroup.all_objects.get(
+ maintenance_uuid=object_under_maintenance.maintenance_uuid
+ )
+ transaction.on_commit(maintenance_incident.resolve_by_disable_maintenance)
+ if object_under_maintenance.maintenance_mode == object_under_maintenance.DEBUG_MAINTENANCE:
+ mode_verbal = "Debug"
+ # Use mode_verbal variable instead of object_under_maintenance.get_maintenance_mode_display()
+ # because after transaction maintenance_mode is None.
+ if organization.slack_team_identity:
+ transaction.on_commit(
+ lambda: object_under_maintenance.notify_about_maintenance_action(
+ f"{mode_verbal} of {verbal} finished."
+ )
+ )
+
+ object_under_maintenance.maintenance_uuid = None
+ object_under_maintenance.maintenance_duration = None
+ object_under_maintenance.maintenance_mode = None
+ object_under_maintenance.maintenance_started_at = None
+ object_under_maintenance.maintenance_author = None
+ object_under_maintenance.save(
+ update_fields=[
+ "maintenance_uuid",
+ "maintenance_duration",
+ "maintenance_mode",
+ "maintenance_started_at",
+ "maintenance_author",
+ ]
+ )
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def check_maintenance_finished(*args, **kwargs):
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+ Organization = apps.get_model("user_management", "Organization")
+ now = timezone.now()
+ maintenance_finish_at = ExpressionWrapper(
+ (F("maintenance_started_at") + F("maintenance_duration")), output_field=fields.DateTimeField()
+ )
+ alert_receive_channel_with_expired_maintenance_ids = (
+ AlertReceiveChannel.objects.filter(maintenance_started_at__isnull=False)
+ .annotate(maintenance_finish_at=maintenance_finish_at)
+ .filter(maintenance_finish_at__lt=now)
+ .values_list("pk", flat=True)
+ )
+
+ for id in alert_receive_channel_with_expired_maintenance_ids:
+ disable_maintenance.apply_async(
+ args=(),
+ kwargs={"alert_receive_channel_id": id, "force": True},
+ )
+
+ organization_with_expired_maintenance_ids = (
+ Organization.objects.filter(maintenance_started_at__isnull=False)
+ .annotate(maintenance_finish_at=maintenance_finish_at)
+ .filter(maintenance_finish_at__lt=now)
+ .values_list("pk", flat=True)
+ )
+ for id in organization_with_expired_maintenance_ids:
+ disable_maintenance.apply_async(
+ args=(),
+ kwargs={"organization_id": id, "force": True},
+ )
diff --git a/engine/apps/alerts/tasks/notify_all.py b/engine/apps/alerts/tasks/notify_all.py
new file mode 100644
index 0000000000..e466b1ceab
--- /dev/null
+++ b/engine/apps/alerts/tasks/notify_all.py
@@ -0,0 +1,96 @@
+from django.apps import apps
+from django.conf import settings
+
+from apps.slack.tasks import check_slack_message_exists_before_post_message_to_thread
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+from .notify_user import notify_user_task
+from .task_logger import task_logger
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def notify_all_task(alert_group_pk, escalation_policy_snapshot_order=None):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ EscalationPolicy = apps.get_model("alerts", "EscalationPolicy")
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
+
+ escalation_snapshot = alert_group.escalation_snapshot
+ escalation_policy_snapshot = escalation_snapshot.escalation_policies_snapshots[escalation_policy_snapshot_order]
+ escalation_policy_pk = escalation_policy_snapshot.id
+ escalation_policy = EscalationPolicy.objects.filter(pk=escalation_policy_pk).first()
+ escalation_policy_step = escalation_policy_snapshot.step
+ slack_channel_id = escalation_snapshot.slack_channel_id
+
+ countdown = 0
+ slack_team_identity = alert_group.channel.organization.slack_team_identity
+
+ AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ author=None,
+ alert_group=alert_group,
+ escalation_policy=escalation_policy,
+ escalation_policy_step=escalation_policy_step,
+ ).save()
+
+ # we cannot notify a slack channel if team does not have slack team identity,
+ # because we make a request to slack to get channel members
+ if slack_team_identity is None or slack_channel_id is None:
+ AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_FAILED,
+ alert_group=alert_group,
+ escalation_policy=escalation_policy,
+ escalation_error_code=AlertGroupLogRecord.ERROR_ESCALATION_NOTIFY_IN_SLACK,
+ escalation_policy_step=escalation_policy_step,
+ ).save()
+ task_logger.debug(
+ f"Failed to notify slack channel for alert_group {alert_group_pk} because slack team identity doesn't exist"
+ )
+ return
+
+ # get users to notify
+ users = slack_team_identity.get_users_from_slack_conversation_for_organization(
+ channel_id=slack_channel_id,
+ organization=alert_group.channel.organization,
+ )
+
+ if escalation_snapshot is not None:
+ escalation_policy_snapshot.notify_to_users_queue = users
+ escalation_snapshot.save_to_alert_group()
+
+ for user in users:
+ reason = "notifying everyone in the channel"
+
+ notify_user_task.apply_async(
+ args=(
+ user.pk,
+ alert_group.pk,
+ ),
+ kwargs={"reason": reason, "prevent_posting_to_thread": True},
+ countdown=countdown,
+ )
+ countdown += 1
+ AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ author=user,
+ alert_group=alert_group,
+ reason=reason.title(),
+ escalation_policy=escalation_policy,
+ escalation_policy_step=escalation_policy_step,
+ ).save()
+
+ if not alert_group.skip_escalation_in_slack and alert_group.notify_in_slack_enabled:
+ text = "Inviting . Reason: *Notify All* Step"
+ # Start task that checks if slack message exists every 10 seconds for 24 hours and publish message
+ # to thread if it does.
+ check_slack_message_exists_before_post_message_to_thread.apply_async(
+ args=(alert_group_pk, text),
+ kwargs={
+ "escalation_policy_pk": escalation_policy_pk,
+ "escalation_policy_step": escalation_policy_step,
+ },
+ countdown=5,
+ )
diff --git a/engine/apps/alerts/tasks/notify_group.py b/engine/apps/alerts/tasks/notify_group.py
new file mode 100644
index 0000000000..d18c31b170
--- /dev/null
+++ b/engine/apps/alerts/tasks/notify_group.py
@@ -0,0 +1,122 @@
+from django.apps import apps
+from django.conf import settings
+
+from apps.slack.scenarios import scenario_step
+from apps.slack.tasks import check_slack_message_exists_before_post_message_to_thread
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+from .notify_user import notify_user_task
+from .task_logger import task_logger
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def notify_group_task(alert_group_pk, escalation_policy_snapshot_order=None):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ UserNotificationPolicy = apps.get_model("base", "UserNotificationPolicy")
+ EscalationPolicy = apps.get_model("alerts", "EscalationPolicy")
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ EscalationDeliveryStep = scenario_step.ScenarioStep.get_step("escalation_delivery", "EscalationDeliveryStep")
+
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
+
+ organization = alert_group.channel.organization
+ slack_team_identity = organization.slack_team_identity
+ if not slack_team_identity:
+ task_logger.info(
+ f"Failed to notify user group for alert_group {alert_group_pk} because slack team identity doesn't exist"
+ )
+ return
+ step = EscalationDeliveryStep(slack_team_identity, organization)
+
+ escalation_snapshot = alert_group.escalation_snapshot
+ escalation_policy_snapshot = escalation_snapshot.escalation_policies_snapshots[escalation_policy_snapshot_order]
+ escalation_policy_pk = escalation_policy_snapshot.id
+ escalation_policy = EscalationPolicy.objects.filter(pk=escalation_policy_pk).first()
+ escalation_policy_step = escalation_policy_snapshot.step
+ usergroup = escalation_policy_snapshot.notify_to_group
+
+ usergroup_users = usergroup.get_users_from_members_for_organization(organization)
+
+ if len(usergroup_users) == 0:
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_FAILED,
+ alert_group=alert_group,
+ escalation_policy=escalation_policy,
+ escalation_error_code=AlertGroupLogRecord.ERROR_ESCALATION_USER_GROUP_IS_EMPTY,
+ escalation_policy_step=escalation_policy_step,
+ )
+ log_record.save()
+ else:
+ if escalation_snapshot is not None:
+ escalation_policy_snapshot.notify_to_users_queue = usergroup_users
+ escalation_snapshot.save_to_alert_group()
+
+ usergroup_notification_plan = ""
+ for user in usergroup_users:
+ if not user.is_notification_allowed:
+ continue
+
+ notification_policies = UserNotificationPolicy.objects.get_or_create_for_user(
+ user=user,
+ important=escalation_policy_step == EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT,
+ )
+ usergroup_notification_plan += "\n_{} (".format(
+ step.get_user_notification_message_for_thread_for_usergroup(user, notification_policies.first())
+ )
+ notification_channels = []
+ if notification_policies.filter(step=UserNotificationPolicy.Step.NOTIFY).count() == 0:
+ usergroup_notification_plan += "Empty notifications"
+ for notification_policy in notification_policies:
+ if notification_policy.step == UserNotificationPolicy.Step.NOTIFY:
+ notification_channels.append(
+ UserNotificationPolicy.NotificationChannel(notification_policy.notify_by).label
+ )
+ usergroup_notification_plan += "→".join(notification_channels) + ")_"
+ reason = f"Membership in User Group"
+
+ notify_user_task.apply_async(
+ args=(
+ user.pk,
+ alert_group.pk,
+ ),
+ kwargs={
+ "reason": reason,
+ "prevent_posting_to_thread": True,
+ "important": escalation_policy_step == EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT,
+ },
+ )
+ AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ author=user,
+ alert_group=alert_group,
+ reason=reason,
+ escalation_policy=escalation_policy,
+ escalation_policy_step=escalation_policy_step,
+ ).save()
+ log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ alert_group=alert_group,
+ escalation_policy=escalation_policy,
+ escalation_policy_step=escalation_policy_step,
+ step_specific_info={"usergroup_handle": usergroup.handle},
+ )
+ log_record.save()
+ if not alert_group.skip_escalation_in_slack and alert_group.notify_in_slack_enabled:
+ text = f"Inviting @{usergroup.handle} User Group: {usergroup_notification_plan}"
+ step_specific_info = {"usergroup_handle": usergroup.handle}
+ # Start task that checks if slack message exists every 10 seconds for 24 hours and publish message
+ # to thread if it does.
+ check_slack_message_exists_before_post_message_to_thread.apply_async(
+ args=(alert_group_pk, text),
+ kwargs={
+ "escalation_policy_pk": escalation_policy_pk,
+ "escalation_policy_step": escalation_policy_step,
+ "step_specific_info": step_specific_info,
+ },
+ countdown=5,
+ )
+ task_logger.debug(
+ f"Finish notify_group_task for alert_group {alert_group_pk}, log record {log_record.pk}",
+ )
diff --git a/engine/apps/alerts/tasks/notify_ical_schedule_shift.py b/engine/apps/alerts/tasks/notify_ical_schedule_shift.py
new file mode 100644
index 0000000000..ce7049b061
--- /dev/null
+++ b/engine/apps/alerts/tasks/notify_ical_schedule_shift.py
@@ -0,0 +1,403 @@
+import datetime
+import json
+from copy import copy
+
+import icalendar
+from django.apps import apps
+from django.utils import timezone
+
+from apps.schedules.ical_events import ical_events
+from apps.schedules.ical_utils import (
+ calculate_shift_diff,
+ get_icalendar_tz_or_utc,
+ get_usernames_from_ical_event,
+ ical_date_to_datetime,
+ is_icals_equal,
+ memoized_users_in_ical,
+)
+from apps.slack.scenarios import scenario_step
+from apps.slack.slack_client import SlackClientWithErrorHandling
+from apps.slack.slack_client.exceptions import SlackAPIException, SlackAPITokenException
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+from .task_logger import task_logger
+
+
+def get_current_shifts_from_ical(calendar, schedule, min_priority=0):
+ calendar_tz = get_icalendar_tz_or_utc(calendar)
+ now = timezone.datetime.now(timezone.utc)
+ events_from_ical_for_three_days = ical_events.get_events_from_ical_between(
+ calendar, now - timezone.timedelta(days=1), now + timezone.timedelta(days=1)
+ )
+ shifts = {}
+ current_users = {}
+ for event in events_from_ical_for_three_days:
+ usernames, priority = get_usernames_from_ical_event(event)
+ users = memoized_users_in_ical(tuple(usernames), schedule.organization)
+ if len(users) > 0:
+ event_start, start_all_day = ical_date_to_datetime(
+ event["DTSTART"].dt,
+ calendar_tz,
+ start=True,
+ )
+ event_end, end_all_day = ical_date_to_datetime(event["DTEND"].dt, calendar_tz, start=False)
+
+ if event["UID"] in shifts:
+ existing_event = shifts[event["UID"]]
+ if existing_event["start"] < now < existing_event["end"]:
+ continue
+ shifts[event["UID"]] = {
+ "users": [u.pk for u in users],
+ "start": event_start,
+ "end": event_end,
+ "all_day": start_all_day,
+ "priority": priority + min_priority, # increase priority for overrides
+ "priority_increased_by": min_priority,
+ }
+ current_users[event["UID"]] = users
+
+ return shifts, current_users
+
+
+def get_next_shifts_from_ical(calendar, schedule, min_priority=0, days_to_lookup=3):
+ calendar_tz = get_icalendar_tz_or_utc(calendar)
+ now = timezone.datetime.now(timezone.utc)
+ next_events_from_ical = ical_events.get_events_from_ical_between(
+ calendar, now - timezone.timedelta(days=1), now + timezone.timedelta(days=days_to_lookup)
+ )
+ shifts = {}
+ for event in next_events_from_ical:
+ usernames, priority = get_usernames_from_ical_event(event)
+ users = memoized_users_in_ical(tuple(usernames), schedule.organization)
+ if len(users) > 0:
+ event_start, start_all_day = ical_date_to_datetime(
+ event["DTSTART"].dt,
+ calendar_tz,
+ start=True,
+ )
+ event_end, end_all_day = ical_date_to_datetime(event["DTEND"].dt, calendar_tz, start=False)
+
+ # next_shifts are not stored in db so we can use User objects directly
+ shifts[f"{event_start.timestamp()}_{event['UID']}"] = {
+ "users": users,
+ "start": event_start,
+ "end": event_end,
+ "all_day": start_all_day,
+ "priority": priority + min_priority, # increase priority for overrides
+ "priority_increased_by": min_priority,
+ }
+
+ return shifts
+
+
+def recalculate_shifts_with_respect_to_priority(shifts, users=None):
+ flag = True
+ while flag:
+ splitted_shifts = {}
+ uids_to_pop = set()
+ splitted = False
+ flag = False
+ for outer_k, outer_shift in shifts.items():
+ if not splitted:
+ for inner_k, inner_shift in shifts.items():
+ if outer_k == inner_k:
+ continue
+ else:
+ if outer_shift.get("priority", 0) > inner_shift.get("priority", 0):
+ if outer_shift["start"] > inner_shift["start"] and outer_shift["end"] < inner_shift["end"]:
+ new_uid_r = f"{inner_k}-split-r"
+ new_uid_l = f"{inner_k}-split-l"
+ splitted_shift_left = copy(inner_shift)
+ splitted_shift_right = copy(inner_shift)
+ splitted_shift_left["end"] = outer_shift["start"]
+ splitted_shift_right["start"] = outer_shift["end"]
+ splitted_shift_left["all_day"] = False
+ splitted_shift_right["all_day"] = False
+ splitted_shifts[new_uid_l] = splitted_shift_left
+ splitted_shifts[new_uid_r] = splitted_shift_right
+ uids_to_pop.add(inner_k)
+ if users is not None:
+ users[new_uid_l] = users[inner_k]
+ users[new_uid_r] = users[inner_k]
+
+ splitted = True
+ flag = True
+ break
+ elif outer_shift["start"] <= inner_shift["start"] < outer_shift["end"] < inner_shift["end"]:
+ inner_shift["start"] = outer_shift["end"]
+ flag = True
+ elif outer_shift["end"] >= inner_shift["end"] > outer_shift["start"] > inner_shift["start"]:
+ inner_shift["end"] = outer_shift["start"]
+ flag = True
+ elif (
+ outer_shift["start"] <= inner_shift["start"]
+ and outer_shift["end"] >= inner_shift["end"]
+ ):
+ uids_to_pop.add(inner_k)
+ flag = True
+ else:
+ flag = False
+ elif outer_shift.get("priority", 0) < inner_shift.get("priority", 0):
+ if inner_shift["start"] > outer_shift["start"] and inner_shift["end"] < outer_shift["end"]:
+ new_uid_r = f"{outer_k}-split-r"
+ new_uid_l = f"{outer_k}-split-l"
+ splitted_shift_left = copy(outer_shift)
+ splitted_shift_right = copy(outer_shift)
+ splitted_shift_left["all_day"] = False
+ splitted_shift_right["all_day"] = False
+ splitted_shift_left["end"] = inner_shift["start"]
+ splitted_shift_right["start"] = inner_shift["end"]
+ splitted_shifts[new_uid_l] = splitted_shift_left
+ splitted_shifts[new_uid_r] = splitted_shift_right
+ uids_to_pop.add(outer_k)
+
+ if users is not None:
+ users[new_uid_l] = users[outer_k]
+ users[new_uid_r] = users[outer_k]
+
+ splitted = True
+ flag = True
+ break
+ elif inner_shift["start"] <= outer_shift["start"] < inner_shift["end"] < outer_shift["end"]:
+ outer_shift["start"] = inner_shift["end"]
+ flag = True
+ elif inner_shift["end"] >= outer_shift["end"] > inner_shift["start"] > outer_shift["start"]:
+ outer_shift["end"] = inner_shift["start"]
+ flag = True
+ elif (
+ inner_shift["start"] <= outer_shift["start"]
+ and inner_shift["end"] >= outer_shift["end"]
+ ):
+ uids_to_pop.add(outer_k)
+ flag = True
+ else:
+ flag = False
+ else:
+ flag = False
+ else:
+ break
+
+ shifts.update(splitted_shifts)
+ for uid in uids_to_pop:
+ shifts.pop(uid)
+
+
+@shared_dedicated_queue_retry_task()
+def notify_ical_schedule_shift(schedule_pk):
+ task_logger.info(f"Notify ical schedule shift {schedule_pk}")
+ OnCallSchedule = apps.get_model("schedules", "OnCallSchedule")
+
+ try:
+ schedule = OnCallSchedule.objects.get(
+ pk=schedule_pk, cached_ical_file_primary__isnull=False, channel__isnull=False
+ )
+ except OnCallSchedule.DoesNotExist:
+ task_logger.info(f"Trying to notify ical schedule shift for non-existing schedule {schedule_pk}")
+ return
+
+ if schedule.organization.slack_team_identity is None:
+ task_logger.info(f"Trying to notify ical schedule shift with no slack team identity {schedule_pk}")
+ return
+
+ MIN_DAYS_TO_LOOKUP_FOR_THE_END_OF_EVENT = 3
+
+ ical_changed = False
+
+ now = timezone.datetime.now(timezone.utc)
+ # get list of iCalendars from current iCal files. If there is more than one calendar, primary calendar will always
+ # be the first
+ current_calendars = schedule.get_icalendars()
+
+ current_shifts = {}
+ # expected current_shifts structure:
+ # {
+ # some uid: {
+ # "users": [users pks],
+ # "start": event start date,
+ # "end": event end date,
+ # "all_day": bool if event has all-day type,
+ # "priority": priority level,
+ # "priority_increased_by": min priority level of primary calendar, (for primary calendar event it is 0)
+ # },
+ # }
+
+ # Current_user dict exists because it's bad idea to serialize User objects.
+ # Instead users' pks are stored in db for calculation related to shift diff.
+ # When it is needed to pass shift's user (e.g. in def get_report_blocks_ical())
+ # we take users from current_users{} by shift uuid and replace users' pk
+ current_users = {}
+
+ overrides_priority = 0
+ for calendar in current_calendars:
+ if calendar is not None:
+ current_shifts_result, current_users_result = get_current_shifts_from_ical(
+ calendar,
+ schedule,
+ overrides_priority,
+ )
+ if overrides_priority == 0 and current_shifts_result:
+ overrides_priority = max([current_shifts_result[uid]["priority"] for uid in current_shifts_result]) + 1
+ current_shifts.update(current_shifts_result)
+ current_users.update(current_users_result)
+
+ recalculate_shifts_with_respect_to_priority(current_shifts, current_users)
+
+ # drop events that don't intersection with current time
+ drop = []
+ for uid, current_shift in current_shifts.items():
+ if not current_shift["start"] < now < current_shift["end"]:
+ drop.append(uid)
+ for item in drop:
+ current_shifts.pop(item)
+
+ is_prev_ical_diff = False
+ prev_overrides_priority = 0
+ prev_shifts = {}
+ prev_users = {}
+
+ # Get list of tuples with prev and current ical file for each calendar. If there is more than one calendar, primary
+ # calendar will be the first.
+ # example result for ical calendar:
+ # [(prev_ical_file_primary, current_ical_file_primary), (prev_ical_file_overrides, current_ical_file_overrides)]
+ # example result for calendar with custom events:
+ # [(prev_ical_file, current_ical_file)]
+ prev_and_current_ical_files = schedule.get_prev_and_current_ical_files()
+
+ for prev_ical_file, current_ical_file in prev_and_current_ical_files:
+ if prev_ical_file is not None and (
+ current_ical_file is None or not is_icals_equal(current_ical_file, prev_ical_file)
+ ):
+ # If icals are not equal then compare current_events from them
+ is_prev_ical_diff = True
+ prev_calendar = icalendar.Calendar.from_ical(prev_ical_file)
+
+ prev_shifts_result, prev_users_result = get_current_shifts_from_ical(
+ prev_calendar,
+ schedule,
+ prev_overrides_priority,
+ )
+ if prev_overrides_priority == 0 and prev_shifts_result:
+ prev_overrides_priority = max([prev_shifts_result[uid]["priority"] for uid in prev_shifts_result]) + 1
+
+ prev_shifts.update(prev_shifts_result)
+ prev_users.update(prev_users_result)
+
+ recalculate_shifts_with_respect_to_priority(prev_shifts, prev_users)
+
+ if is_prev_ical_diff:
+ # drop events that don't intersection with current time
+ drop = []
+ for uid, prev_shift in prev_shifts.items():
+ if not prev_shift["start"] < now < prev_shift["end"]:
+ drop.append(uid)
+ for item in drop:
+ prev_shifts.pop(item)
+
+ shift_changed, diff_uids = calculate_shift_diff(current_shifts, prev_shifts)
+
+ else:
+ # Else comparing events from prev and current shifts
+ prev_shifts = json.loads(schedule.current_shifts) if not schedule.empty_oncall else {}
+ # convert datetimes which was dumped to str back to datetime to calculate shift diff correct
+ str_format = "%Y-%m-%d %X%z"
+ for prev_shift in prev_shifts.values():
+ prev_shift["start"] = datetime.datetime.strptime(prev_shift["start"], str_format)
+ prev_shift["end"] = datetime.datetime.strptime(prev_shift["end"], str_format)
+
+ shift_changed, diff_uids = calculate_shift_diff(current_shifts, prev_shifts)
+
+ if shift_changed:
+ # Get only new/changed shifts to send a reminder message.
+ new_shifts = []
+ for uid in diff_uids:
+ # using copy to not to mutate original current_shifts dict which will be stored in db as current_shifts
+ new_shift = copy(current_shifts[uid])
+ # replace users' pk by objects to make reminder message from new shifts
+ new_shift["users"] = current_users[uid]
+ new_shifts.append(new_shift)
+ new_shifts = sorted(new_shifts, key=lambda shift: shift["start"])
+
+ if len(new_shifts) != 0:
+ days_to_lookup = (new_shifts[-1]["end"].date() - now.date()).days
+ days_to_lookup = max([days_to_lookup, MIN_DAYS_TO_LOOKUP_FOR_THE_END_OF_EVENT])
+ else:
+ days_to_lookup = MIN_DAYS_TO_LOOKUP_FOR_THE_END_OF_EVENT
+
+ next_shifts = {}
+ next_overrides_priority = 0
+
+ for calendar in current_calendars:
+ if calendar is not None:
+ next_shifts_result = get_next_shifts_from_ical(
+ calendar,
+ schedule,
+ next_overrides_priority,
+ days_to_lookup=days_to_lookup,
+ )
+ if next_overrides_priority == 0 and next_shifts_result:
+ next_overrides_priority = (
+ max([next_shifts_result[uid]["priority"] for uid in next_shifts_result]) + 1
+ )
+
+ next_shifts.update(next_shifts_result)
+
+ recalculate_shifts_with_respect_to_priority(next_shifts)
+
+ # drop events that already started
+ drop = []
+ for uid, next_shift in next_shifts.items():
+ if now > next_shift["start"]:
+ drop.append(uid)
+ for item in drop:
+ next_shifts.pop(item)
+
+ next_shifts_from_ical = sorted(next_shifts.values(), key=lambda shift: shift["start"])
+
+ upcoming_shifts = []
+ # Add the earliest next_shift
+ if len(next_shifts_from_ical) > 0:
+ earliest_shift = next_shifts_from_ical[0]
+ upcoming_shifts.append(earliest_shift)
+ # Check if there are next shifts with the same start as the earliest
+ for shift in next_shifts_from_ical[1:]:
+ if shift["start"] == earliest_shift["start"]:
+ upcoming_shifts.append(shift)
+
+ empty_oncall = len(current_shifts) == 0
+ if empty_oncall:
+ schedule.empty_oncall = True
+ else:
+ schedule.empty_oncall = False
+ schedule.current_shifts = json.dumps(current_shifts, default=str)
+
+ schedule.save(update_fields=["current_shifts", "empty_oncall"])
+
+ if len(new_shifts) > 0 or empty_oncall:
+ slack_client = SlackClientWithErrorHandling(schedule.organization.slack_team_identity.bot_access_token)
+ step = scenario_step.ScenarioStep.get_step("schedules", "EditScheduleShiftNotifyStep")
+ report_blocks = step.get_report_blocks_ical(new_shifts, upcoming_shifts, schedule, empty_oncall)
+
+ if schedule.notify_oncall_shift_freq != OnCallSchedule.NotifyOnCallShiftFreq.NEVER:
+ try:
+ if ical_changed:
+ slack_client.api_call(
+ "chat.postMessage", channel=schedule.channel, text=f"Schedule {schedule.name} was changed"
+ )
+
+ slack_client.api_call(
+ "chat.postMessage",
+ channel=schedule.channel,
+ blocks=report_blocks,
+ text=f"On-call shift for schedule {schedule.name} has changed",
+ )
+ except SlackAPITokenException:
+ pass
+ except SlackAPIException as e:
+ if e.response["error"] == "channel_not_found":
+ print(e)
+ elif e.response["error"] == "is_archived":
+ print(e)
+ elif e.response["error"] == "invalid_auth":
+ print(e)
+ else:
+ raise e
diff --git a/engine/apps/alerts/tasks/notify_user.py b/engine/apps/alerts/tasks/notify_user.py
new file mode 100644
index 0000000000..05a9456f9f
--- /dev/null
+++ b/engine/apps/alerts/tasks/notify_user.py
@@ -0,0 +1,417 @@
+import random
+import time
+
+from django.apps import apps
+from django.conf import settings
+from django.db import transaction
+from django.utils import timezone
+from kombu import uuid as celery_uuid
+from push_notifications.models import APNSDevice
+
+from apps.alerts.constants import NEXT_ESCALATION_DELAY
+from apps.alerts.incident_appearance.renderers.web_renderer import AlertGroupWebRenderer
+from apps.alerts.signals import user_notification_action_triggered_signal
+from apps.base.messaging import get_messaging_backend_from_id
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+from .task_logger import task_logger
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def notify_user_task(
+ user_pk,
+ alert_group_pk,
+ previous_notification_policy_pk=None,
+ reason=None,
+ prevent_posting_to_thread=False,
+ notify_even_acknowledged=False,
+ important=False,
+ notify_anyway=False,
+):
+ UserNotificationPolicy = apps.get_model("base", "UserNotificationPolicy")
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+ User = apps.get_model("user_management", "User")
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ UserHasNotification = apps.get_model("alerts", "UserHasNotification")
+
+ try:
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
+ except AlertGroup.DoesNotExist:
+ return f"notify_user_task: alert_group {alert_group_pk} doesn't exist"
+
+ countdown = 0
+ stop_escalation = False
+ log_message = ""
+ log_record = None
+
+ with transaction.atomic():
+ try:
+ user = User.objects.get(pk=user_pk)
+ except User.DoesNotExist:
+ return f"notify_user_task: user {user_pk} doesn't exist"
+
+ organization = alert_group.channel.organization
+
+ if not user.is_notification_allowed:
+ task_logger.info(f"notify_user_task: user {user.pk} notification is not allowed for role {user.role}")
+ return
+
+ user_has_notification, _ = UserHasNotification.objects.get_or_create(
+ user=user,
+ alert_group=alert_group,
+ )
+
+ user_has_notification = UserHasNotification.objects.filter(pk=user_has_notification.pk).select_for_update()[0]
+
+ if previous_notification_policy_pk is None:
+ notification_policy = UserNotificationPolicy.objects.get_or_create_for_user(
+ user=user, important=important
+ ).first()
+ # Here we collect a brief overview of notification steps configured for user to send it to thread.
+ collected_steps_ids = []
+ next_notification_policy = notification_policy.next()
+ while next_notification_policy is not None:
+ if next_notification_policy.step == UserNotificationPolicy.Step.NOTIFY:
+ if next_notification_policy.notify_by not in collected_steps_ids:
+ collected_steps_ids.append(next_notification_policy.notify_by)
+ next_notification_policy = next_notification_policy.next()
+ collected_steps = ", ".join(
+ UserNotificationPolicy.NotificationChannel(step_id).label for step_id in collected_steps_ids
+ )
+ reason = ("Reason: " + reason + "\n") if reason is not None else ""
+ reason += ("Further notification plan: " + collected_steps) if len(collected_steps_ids) > 0 else ""
+ else:
+ if notify_user_task.request.id != user_has_notification.active_notification_policy_id:
+ task_logger.info(
+ f"notify_user_task: active_notification_policy_id mismatch. "
+ f"Duplication or non-active escalation triggered. "
+ f"Active: {user_has_notification.active_notification_policy_id}"
+ )
+ return
+
+ try:
+ notification_policy = UserNotificationPolicy.objects.get(pk=previous_notification_policy_pk)
+ if notification_policy.user.organization != organization:
+ notification_policy = UserNotificationPolicy.objects.get(
+ order=notification_policy.order, user=user, important=important
+ )
+ notification_policy = notification_policy.next()
+ except UserNotificationPolicy.DoesNotExist:
+ task_logger.info(
+ f"notify_user_taskLNotification policy {previous_notification_policy_pk} has been deleted"
+ )
+ return
+ reason = None
+ if notification_policy is None:
+ stop_escalation = True
+ log_record = UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FINISHED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ slack_prevent_posting=prevent_posting_to_thread,
+ )
+ log_message += "Personal escalation exceeded"
+ else:
+ if (
+ (alert_group.acknowledged and not notify_even_acknowledged)
+ or alert_group.resolved
+ or alert_group.is_archived
+ or alert_group.wiped_at
+ or alert_group.root_alert_group
+ ):
+ return "Acknowledged, resolved, archived, attached or wiped."
+
+ if alert_group.silenced and not notify_anyway:
+ task_logger.info(
+ f"notify_user_task: skip notification user {user.pk} because alert_group {alert_group.pk} is silenced"
+ )
+ return
+
+ active_invitations_count = alert_group.invitations.filter(invitee=user, is_active=True).count()
+ if (notify_even_acknowledged or notify_anyway) and active_invitations_count == 0:
+ task_logger.info(f"notify_user_task: skip notification user {user.pk} invitation exceeded")
+ return
+
+ if notification_policy.step == UserNotificationPolicy.Step.WAIT:
+ if notification_policy.wait_delay is not None:
+ delay_in_seconds = notification_policy.wait_delay.total_seconds()
+ else:
+ delay_in_seconds = 0
+ countdown = delay_in_seconds
+ log_record = UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ slack_prevent_posting=prevent_posting_to_thread,
+ notification_step=notification_policy.step,
+ )
+ task_logger.info(f"notify_user_task: Waiting {delay_in_seconds} to notify user {user.pk}")
+ elif notification_policy.step == UserNotificationPolicy.Step.NOTIFY:
+ user_to_be_notified_in_slack = (
+ notification_policy.notify_by == UserNotificationPolicy.NotificationChannel.SLACK
+ )
+ user_to_be_notified_in_telegram = (
+ notification_policy.notify_by == UserNotificationPolicy.NotificationChannel.TELEGRAM
+ )
+
+ if user_to_be_notified_in_slack and alert_group.notify_in_slack_enabled is False:
+ log_record = UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ reason=reason,
+ slack_prevent_posting=prevent_posting_to_thread,
+ notification_step=notification_policy.step,
+ notification_channel=notification_policy.notify_by,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_POSTING_TO_SLACK_IS_DISABLED,
+ )
+ elif user_to_be_notified_in_telegram and alert_group.notify_in_telegram_enabled is False:
+ log_record = UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ reason=reason,
+ slack_prevent_posting=prevent_posting_to_thread,
+ notification_step=notification_policy.step,
+ notification_channel=notification_policy.notify_by,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_POSTING_TO_TELEGRAM_IS_DISABLED,
+ )
+ else:
+ log_record = UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ reason=reason,
+ slack_prevent_posting=prevent_posting_to_thread,
+ notification_step=notification_policy.step,
+ notification_channel=notification_policy.notify_by,
+ )
+ if log_record: # log_record is None if user notification policy step is unspecified
+ log_record.save()
+ if notify_user_task.request.retries == 0:
+ transaction.on_commit(lambda: send_user_notification_signal.apply_async((log_record.pk,)))
+
+ if not stop_escalation:
+ if notification_policy.step != UserNotificationPolicy.Step.WAIT:
+ transaction.on_commit(lambda: perform_notification.apply_async((log_record.pk,)))
+
+ delay = NEXT_ESCALATION_DELAY
+ if countdown is not None:
+ delay += countdown
+ task_id = celery_uuid()
+
+ user_has_notification.active_notification_policy_id = task_id
+ user_has_notification.save(update_fields=["active_notification_policy_id"])
+
+ transaction.on_commit(
+ lambda: notify_user_task.apply_async(
+ (user.pk, alert_group.pk, notification_policy.pk, reason),
+ {
+ "notify_even_acknowledged": notify_even_acknowledged,
+ "notify_anyway": notify_anyway,
+ "prevent_posting_to_thread": prevent_posting_to_thread,
+ },
+ countdown=delay,
+ task_id=task_id,
+ )
+ )
+
+ else:
+ user_has_notification.active_notification_policy_id = None
+ user_has_notification.save(update_fields=["active_notification_policy_id"])
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def perform_notification(log_record_pk):
+ SMSMessage = apps.get_model("twilioapp", "SMSMessage")
+ PhoneCall = apps.get_model("twilioapp", "PhoneCall")
+ # EmailMessage = apps.get_model("sendgridapp", "EmailMessage") TODO: restore email notifications
+ UserNotificationPolicy = apps.get_model("base", "UserNotificationPolicy")
+ TelegramToUserConnector = apps.get_model("telegram", "TelegramToUserConnector")
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+ log_record = UserNotificationPolicyLogRecord.objects.get(pk=log_record_pk)
+
+ user = log_record.author
+ alert_group = log_record.alert_group
+ notification_policy = log_record.notification_policy
+ notification_channel = notification_policy.notify_by if notification_policy else None
+ if user is None or notification_policy is None:
+ UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ reason="Expected data is missing",
+ alert_group=alert_group,
+ notification_step=notification_policy.step if notification_policy else None,
+ notification_channel=notification_channel,
+ notification_error_code=None,
+ ).save()
+ return
+
+ if notification_channel == UserNotificationPolicy.NotificationChannel.SMS:
+ SMSMessage.send_sms(user, alert_group, notification_policy)
+
+ elif notification_channel == UserNotificationPolicy.NotificationChannel.PHONE_CALL:
+ PhoneCall.make_call(user, alert_group, notification_policy)
+
+ elif notification_channel == UserNotificationPolicy.NotificationChannel.TELEGRAM:
+ if alert_group.notify_in_telegram_enabled is True:
+ TelegramToUserConnector.notify_user(user, alert_group, notification_policy)
+
+ # TODO: restore email notifications
+ # elif notification_channel == UserNotificationPolicy.NotificationChannel.EMAIL:
+ # EmailMessage.send_incident_mail(user, alert_group, notification_policy)
+
+ elif notification_channel == UserNotificationPolicy.NotificationChannel.SLACK:
+ # TODO: refactor checking the possibility of sending a notification in slack
+ # Code below is not consistent.
+ # We check various slack reasons to skip escalation in this task, in send_slack_notification,
+ # before and after posting of slack message.
+ if alert_group.reason_to_skip_escalation == alert_group.RATE_LIMITED:
+ task_logger.debug(
+ f"send_slack_notification for alert_group {alert_group.pk} failed because of slack ratelimit."
+ )
+ UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ reason="Slack ratelimit",
+ alert_group=alert_group,
+ notification_step=notification_policy.step,
+ notification_channel=notification_channel,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_RATELIMIT,
+ ).save()
+ return
+
+ if alert_group.notify_in_slack_enabled is True and not log_record.slack_prevent_posting:
+ # we cannot notify users in Slack if their team does not have Slack integration
+ if alert_group.channel.organization.slack_team_identity is None:
+ task_logger.debug(
+ f"send_slack_notification for alert_group {alert_group.pk} failed because slack team identity "
+ f"does not exist."
+ )
+ UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ reason="Slack team identity does not exist",
+ alert_group=alert_group,
+ notification_step=notification_policy.step,
+ notification_channel=notification_channel,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_TOKEN_ERROR,
+ ).save()
+ return
+
+ retry_timeout_hours = 1
+ slack_message = alert_group.get_slack_message()
+ if slack_message is not None:
+ slack_message.send_slack_notification(user, alert_group, notification_policy)
+ task_logger.debug(f"Finished send_slack_notification for alert_group {alert_group.pk}.")
+ # check how much time has passed since log record was created
+ # to prevent eternal loop of restarting perform_notification task
+ elif timezone.now() < log_record.created_at + timezone.timedelta(hours=retry_timeout_hours):
+ task_logger.debug(
+ f"send_slack_notification for alert_group {alert_group.pk} failed because slack message "
+ f"does not exist. Restarting perform_notification."
+ )
+ restart_delay_seconds = 60
+ perform_notification.apply_async((log_record_pk,), countdown=restart_delay_seconds)
+ else:
+ task_logger.debug(
+ f"send_slack_notification for alert_group {alert_group.pk} failed because slack message "
+ f"after {retry_timeout_hours} hours still does not exist"
+ )
+ UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ reason="Slack message does not exist",
+ alert_group=alert_group,
+ notification_step=notification_policy.step,
+ notification_channel=notification_channel,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK,
+ ).save()
+
+ elif notification_channel == UserNotificationPolicy.NotificationChannel.MOBILE_PUSH_GENERAL:
+ message = f"{AlertGroupWebRenderer(alert_group).render().get('title', 'Incident')}"
+ thread_id = f"{alert_group.channel.organization.public_primary_key}:{alert_group.public_primary_key}"
+ devices_to_notify = APNSDevice.objects.filter(user_id=user.pk)
+ sounds = ["alarm.aiff", "operation.aiff"]
+ devices_to_notify.send_message(
+ message,
+ thread_id=thread_id,
+ category="USER_NEW_INCIDENT",
+ sound={"critical": 1, "name": f"{random.choice(sounds)}"},
+ extra={
+ "orgId": f"{alert_group.channel.organization.public_primary_key}",
+ "orgName": f"{alert_group.channel.organization.stack_slug}",
+ "incidentId": f"{alert_group.public_primary_key}",
+ "status": f"{alert_group.status}",
+ "interruption-level": "critical",
+ },
+ )
+
+ elif notification_channel == UserNotificationPolicy.NotificationChannel.MOBILE_PUSH_CRITICAL:
+ message = f"!!! {AlertGroupWebRenderer(alert_group).render().get('title', 'Incident')}"
+ thread_id = f"{alert_group.channel.organization.public_primary_key}:{alert_group.public_primary_key}"
+ devices_to_notify = APNSDevice.objects.filter(user_id=user.pk)
+ sounds = ["ambulance.aiff"]
+ devices_to_notify.send_message(
+ message,
+ thread_id=thread_id,
+ category="USER_NEW_INCIDENT",
+ sound={"critical": 1, "name": f"{random.choice(sounds)}"},
+ extra={
+ "orgId": f"{alert_group.channel.organization.public_primary_key}",
+ "orgName": f"{alert_group.channel.organization.stack_slug}",
+ "incidentId": f"{alert_group.public_primary_key}",
+ "status": f"{alert_group.status}",
+ "interruption-level": "critical",
+ },
+ )
+ else:
+ try:
+ backend_id = UserNotificationPolicy.NotificationChannel(notification_policy.notify_by).name
+ backend = get_messaging_backend_from_id(backend_id)
+ except ValueError:
+ backend = None
+
+ if backend is None:
+ task_logger.debug(f"notify_user failed because messaging backend is not available")
+ UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ reason="Messaging backend not available",
+ alert_group=alert_group,
+ notification_step=notification_policy.step,
+ notification_channel=notification_channel,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_MESSAGING_BACKEND_ERROR,
+ ).save()
+ return
+ backend.notify_user(user, alert_group, notification_policy)
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def send_user_notification_signal(log_record_pk):
+ start_time = time.time()
+
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+ task_logger.debug(f"LOG RECORD PK: {log_record_pk}")
+ task_logger.debug(f"LOG RECORD LAST: {UserNotificationPolicyLogRecord.objects.last()}")
+
+ log_record = UserNotificationPolicyLogRecord.objects.get(pk=log_record_pk)
+ user_notification_action_triggered_signal.send(sender=send_user_notification_signal, log_record=log_record)
+
+ task_logger.debug("--- USER SIGNAL TOOK %s seconds ---" % (time.time() - start_time))
diff --git a/engine/apps/alerts/tasks/resolve_alert_group_by_source_if_needed.py b/engine/apps/alerts/tasks/resolve_alert_group_by_source_if_needed.py
new file mode 100644
index 0000000000..43552cd77e
--- /dev/null
+++ b/engine/apps/alerts/tasks/resolve_alert_group_by_source_if_needed.py
@@ -0,0 +1,28 @@
+from django.apps import apps
+from django.conf import settings
+
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def resolve_alert_group_by_source_if_needed(alert_group_pk):
+ """
+ The purpose of this task is to avoid computation-heavy check after each alert.
+ Should be delayed and invoked only for the last one.
+ """
+ AlertGroupForAlertManager = apps.get_model("alerts", "AlertGroupForAlertManager")
+ AlertForAlertManager = apps.get_model("alerts", "AlertForAlertManager")
+
+ alert_group = AlertGroupForAlertManager.all_objects.get(pk=alert_group_pk)
+
+ if not resolve_alert_group_by_source_if_needed.request.id == alert_group.active_resolve_calculation_id:
+ return "Resolve calculation celery ID mismatch. Duplication or non-active. Active: {}".format(
+ alert_group.active_resolve_calculation_id
+ )
+ else:
+ last_alert = AlertForAlertManager.objects.get(pk=alert_group.alerts.last().pk)
+ if alert_group.is_alert_a_resolve_signal(last_alert):
+ alert_group.resolve_by_source()
+ return f"resolved alert_group {alert_group.pk}"
diff --git a/engine/apps/alerts/tasks/resolve_alert_group_if_needed.py b/engine/apps/alerts/tasks/resolve_alert_group_if_needed.py
new file mode 100644
index 0000000000..1fb38712e7
--- /dev/null
+++ b/engine/apps/alerts/tasks/resolve_alert_group_if_needed.py
@@ -0,0 +1,31 @@
+# TODO: remove this file when all the resolve_alert_group_if_needed are processed
+# New version - apps.alerts.tasks.resolve_alert_group_by_source_if_needed.resolve_alert_group_by_source_if_needed
+
+from django.apps import apps
+from django.conf import settings
+
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def resolve_alert_group_if_needed(alert_id):
+ """
+ The purpose of this task is to avoid computation-heavy check after each alert.
+ Should be delayed and invoked only for the last one.
+ """
+ AlertGroupForAlertManager = apps.get_model("alerts", "AlertGroupForAlertManager")
+ AlertForAlertManager = apps.get_model("alerts", "AlertForAlertManager")
+
+ alert = AlertForAlertManager.objects.get(pk=alert_id)
+ if not resolve_alert_group_if_needed.request.id == alert.group.active_resolve_calculation_id:
+ return "Resolve calculation celery ID mismatch. Duplication or non-active. Active: {}".format(
+ alert.group.active_resolve_calculation_id
+ )
+ else:
+ # Retrieving group again to have an access to child class methods
+ alert_group = AlertGroupForAlertManager.all_objects.get(pk=alert.group_id)
+ if alert_group.is_alert_a_resolve_signal(alert):
+ alert_group.resolve_by_source()
+ return f"resolved alert_group {alert_group.pk}"
diff --git a/engine/apps/alerts/tasks/resolve_by_last_step.py b/engine/apps/alerts/tasks/resolve_by_last_step.py
new file mode 100644
index 0000000000..17d2e9e8bf
--- /dev/null
+++ b/engine/apps/alerts/tasks/resolve_by_last_step.py
@@ -0,0 +1,13 @@
+from django.apps import apps
+from django.conf import settings
+
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def resolve_by_last_step_task(alert_group_pk):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
+ alert_group.resolve_by_last_step()
diff --git a/engine/apps/alerts/tasks/send_alert_group_signal.py b/engine/apps/alerts/tasks/send_alert_group_signal.py
new file mode 100644
index 0000000000..be632a2bbc
--- /dev/null
+++ b/engine/apps/alerts/tasks/send_alert_group_signal.py
@@ -0,0 +1,17 @@
+import time
+
+from django.conf import settings
+
+from apps.alerts.signals import alert_group_action_triggered_signal
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def send_alert_group_signal(log_record_id):
+ start_time = time.time()
+
+ alert_group_action_triggered_signal.send(sender=send_alert_group_signal, log_record=log_record_id)
+
+ print("--- %s seconds ---" % (time.time() - start_time))
diff --git a/engine/apps/alerts/tasks/send_update_log_report_signal.py b/engine/apps/alerts/tasks/send_update_log_report_signal.py
new file mode 100644
index 0000000000..0a00725f34
--- /dev/null
+++ b/engine/apps/alerts/tasks/send_update_log_report_signal.py
@@ -0,0 +1,24 @@
+from django.apps import apps
+from django.conf import settings
+
+from apps.alerts.signals import alert_group_update_log_report_signal
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def send_update_log_report_signal(log_record_pk=None, alert_group_pk=None):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ if log_record_pk and not alert_group_pk: # legacy
+ log_record = AlertGroupLogRecord.objects.get(pk=log_record_pk)
+ if log_record.type == AlertGroupLogRecord.TYPE_DELETED:
+ return
+ alert_group_pk = log_record.alert_group.pk
+
+ if alert_group_pk is not None:
+ alert_group_update_log_report_signal.send(
+ sender=send_update_log_report_signal,
+ alert_group=alert_group_pk,
+ )
diff --git a/engine/apps/alerts/tasks/send_update_postmortem_signal.py b/engine/apps/alerts/tasks/send_update_postmortem_signal.py
new file mode 100644
index 0000000000..92217e1e53
--- /dev/null
+++ b/engine/apps/alerts/tasks/send_update_postmortem_signal.py
@@ -0,0 +1,29 @@
+from django.apps import apps
+from django.conf import settings
+
+from apps.alerts.signals import alert_group_update_resolution_note_signal
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def send_update_postmortem_signal(alert_group_pk, resolution_note_pk=None):
+ # Legacy task, remove
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ ResolutionNote = apps.get_model("alerts", "ResolutionNote")
+
+ alert_group = AlertGroup.unarchived_objects.filter(pk=alert_group_pk).first()
+ if alert_group is None:
+ print("Sent signal to update postmortem, but alert group is archived or does not exist")
+ return
+
+ resolution_note = None
+ if resolution_note_pk is not None:
+ resolution_note = ResolutionNote.objects_with_deleted.get(pk=resolution_note_pk)
+
+ alert_group_update_resolution_note_signal.send(
+ sender=send_update_postmortem_signal,
+ alert_group=alert_group,
+ resolution_note=resolution_note,
+ )
diff --git a/engine/apps/alerts/tasks/send_update_resolution_note_signal.py b/engine/apps/alerts/tasks/send_update_resolution_note_signal.py
new file mode 100644
index 0000000000..b17dd554a6
--- /dev/null
+++ b/engine/apps/alerts/tasks/send_update_resolution_note_signal.py
@@ -0,0 +1,27 @@
+from django.apps import apps
+from django.conf import settings
+
+from apps.alerts.signals import alert_group_update_resolution_note_signal
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def send_update_resolution_note_signal(alert_group_pk, resolution_note_pk):
+ """Sends a signal to update messages associated with resolution note"""
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ ResolutionNote = apps.get_model("alerts", "ResolutionNote")
+
+ alert_group = AlertGroup.unarchived_objects.filter(pk=alert_group_pk).first()
+ if alert_group is None:
+ print("Sent signal to update resolution note, but alert group is archived or does not exist")
+ return
+
+ resolution_note = ResolutionNote.objects_with_deleted.get(pk=resolution_note_pk)
+
+ alert_group_update_resolution_note_signal.send(
+ sender=send_update_resolution_note_signal,
+ alert_group=alert_group,
+ resolution_note=resolution_note,
+ )
diff --git a/engine/apps/alerts/tasks/sync_grafana_alerting_contact_points.py b/engine/apps/alerts/tasks/sync_grafana_alerting_contact_points.py
new file mode 100644
index 0000000000..a54b9d0a3e
--- /dev/null
+++ b/engine/apps/alerts/tasks/sync_grafana_alerting_contact_points.py
@@ -0,0 +1,13 @@
+from django.apps import apps
+
+from apps.alerts.grafana_alerting_sync_manager.grafana_alerting_sync import GrafanaAlertingSyncManager
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=10)
+def sync_grafana_alerting_contact_points(alert_receive_channel_id):
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+
+ alert_receive_channel = AlertReceiveChannel.objects_with_deleted.get(pk=alert_receive_channel_id)
+
+ GrafanaAlertingSyncManager(alert_receive_channel).sync_each_contact_point()
diff --git a/engine/apps/alerts/tasks/task_logger.py b/engine/apps/alerts/tasks/task_logger.py
new file mode 100644
index 0000000000..484e54b4d8
--- /dev/null
+++ b/engine/apps/alerts/tasks/task_logger.py
@@ -0,0 +1,6 @@
+import logging
+
+from celery.utils.log import get_task_logger
+
+task_logger = get_task_logger(__name__)
+task_logger.setLevel(logging.DEBUG)
diff --git a/engine/apps/alerts/tasks/unsilence.py b/engine/apps/alerts/tasks/unsilence.py
new file mode 100644
index 0000000000..3f1109292f
--- /dev/null
+++ b/engine/apps/alerts/tasks/unsilence.py
@@ -0,0 +1,48 @@
+from django.apps import apps
+from django.conf import settings
+from django.db import transaction
+
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+from .compare_escalations import compare_escalations
+from .send_alert_group_signal import send_alert_group_signal
+from .task_logger import task_logger
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def unsilence_task(alert_group_pk):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ task_logger.info(f"Start unsilence_task for alert_group {alert_group_pk}")
+ with transaction.atomic():
+ try:
+ alert_group = AlertGroup.unarchived_objects.filter(pk=alert_group_pk).select_for_update()[
+ 0
+ ] # Lock alert_group:
+ except IndexError:
+ task_logger.info(f"unsilence_task. alert_group {alert_group_pk} doesn't exist")
+ return
+ if not compare_escalations(unsilence_task.request.id, alert_group.unsilence_task_uuid):
+ task_logger.info(
+ f"unsilence_task. alert_group {alert_group.pk}.ID mismatch.Active: {alert_group.unsilence_task_uuid}"
+ )
+ return
+ if alert_group.status == AlertGroup.SILENCED and alert_group.is_root_alert_group:
+ task_logger.info(f"unsilence alert_group {alert_group_pk} and start escalation if needed")
+ alert_group.un_silence()
+ alert_group.start_escalation_if_needed()
+ un_silence_log_record = AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_UN_SILENCE,
+ alert_group=alert_group,
+ reason="auto unsilence",
+ )
+ un_silence_log_record.save()
+ transaction.on_commit(lambda: send_alert_group_signal.apply_async((un_silence_log_record.pk,)))
+ else:
+ task_logger.info(
+ f"Failed to unsilence alert_group {alert_group_pk}: alert_group status: {alert_group.status}, "
+ f"is root: {alert_group.is_root_alert_group}"
+ )
+ task_logger.info(f"Finish unsilence_task for alert_group {alert_group_pk}")
diff --git a/engine/apps/alerts/tasks/wipe.py b/engine/apps/alerts/tasks/wipe.py
new file mode 100644
index 0000000000..dff1bbe121
--- /dev/null
+++ b/engine/apps/alerts/tasks/wipe.py
@@ -0,0 +1,15 @@
+from django.apps import apps
+from django.conf import settings
+
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def wipe(alert_group_pk, user_pk):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ User = apps.get_model("user_management", "User")
+ alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk).first()
+ user = User.objects.filter(pk=user_pk).first()
+ alert_group.wipe_by_user(user)
diff --git a/engine/apps/alerts/terraform_renderer/__init__.py b/engine/apps/alerts/terraform_renderer/__init__.py
new file mode 100644
index 0000000000..952c0d5c6f
--- /dev/null
+++ b/engine/apps/alerts/terraform_renderer/__init__.py
@@ -0,0 +1,2 @@
+from .terraform_file_renderer import TerraformFileRenderer # noqa: F401
+from .terraform_state_renderer import TerraformStateRenderer # noqa: F401
diff --git a/engine/apps/alerts/terraform_renderer/terraform_file_renderer.py b/engine/apps/alerts/terraform_renderer/terraform_file_renderer.py
new file mode 100644
index 0000000000..4013410d83
--- /dev/null
+++ b/engine/apps/alerts/terraform_renderer/terraform_file_renderer.py
@@ -0,0 +1,782 @@
+from django.apps import apps
+from django.db.models import OuterRef, Subquery
+from django.utils.text import slugify
+
+from apps.alerts.models import EscalationPolicy
+from apps.schedules.models import OnCallScheduleCalendar, OnCallScheduleICal
+
+
+class TerraformFileRenderer:
+
+ AMIXR_USER_DATA_TEMPLATE = '\ndata "amixr_user" "{}" {{\n username = "{}"\n}}\n'
+
+ TEAM_DATA_TEMPLATE = '\ndata "amixr_team" "{}" {{\n name = "{}"\n}}\n'
+
+ CUSTOM_ACTION_DATA_TEMPLATE = (
+ '\ndata "amixr_action" "{}" {{\n name = "{}"\n integration_id = amixr_integration.{}.id\n}}\n'
+ )
+
+ SCHEDULE_DATA_TEMPLATE = '\ndata "amixr_schedule" "{}" {{\n name = "{}"\n}}\n'
+
+ USER_GROUP_DATA_TEMPLATE = '\ndata "amixr_user_group" "{}" {{\n slack_handle = "{}"\n}}\n'
+
+ SLACK_CHANNEL_DATA_TEMPLATE = '\ndata "amixr_slack_channel" "{}" {{\n name = "{}"\n}}\n'
+
+ INTEGRATION_RESOURCE_TEMPLATE = (
+ '\nresource "amixr_integration" "{}" {{\n name = "{}"\n type = "{}"\n team_id = {}\n}}\n'
+ )
+ INTEGRATION_RESOURCE_TEMPLATE_WITH_TEMPLATES = (
+ '\nresource "amixr_integration" "{}" {{\n'
+ ' name = "{}"\n'
+ ' type = "{}"\n'
+ " team_id = {}\n"
+ " templates {}\n"
+ "}}\n"
+ )
+ ROUTE_RESOURCES_TEMPLATE = (
+ '\nresource "amixr_route" "{}" {{\n'
+ " integration_id = amixr_integration.{}.id\n"
+ " escalation_chain_id = {}.id\n"
+ ' routing_regex = "{}"\n'
+ " position = {}\n"
+ "}}\n"
+ )
+ ROUTE_RESOURCES_TEMPLATE_WITH_SLACK = (
+ '\nresource "amixr_route" "{}" {{\n'
+ " integration_id = amixr_integration.{}.id\n"
+ " escalation_chain_id = {}.id\n"
+ ' routing_regex = "{}"\n'
+ " position = {}\n"
+ " slack {{\n"
+ " channel_id = {}\n"
+ " }}\n"
+ "}}\n"
+ )
+
+ ESCALATION_CHAIN_DATA_TEMPLATE = '\ndata "amixr_escalation_chain" "{}" {{\n name = "{}"\n}}\n'
+
+ ESCALATION_CHAIN_RESOURCE_TEMPLATE = (
+ '\nresource "amixr_escalation_chain" "{}" {{\n name = "{}"\n team_id = {}\n}}\n'
+ )
+
+ ESCALATION_POLICY_TEMPLATES = {
+ EscalationPolicy.PUBLIC_STEP_CHOICES_MAP[EscalationPolicy.STEP_WAIT]: '\nresource "amixr_escalation" "{}" {{\n'
+ " escalation_chain_id = {}\n"
+ ' type = "{}"\n'
+ " duration = {}\n"
+ " position = {}\n"
+ "}}\n",
+ EscalationPolicy.PUBLIC_STEP_CHOICES_MAP[
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS
+ ]: '\nresource "amixr_escalation" "{}" {{\n'
+ " escalation_chain_id = {}\n"
+ ' type = "{}"\n'
+ " important = {}\n"
+ " persons_to_notify = {}\n"
+ " position = {}\n"
+ "}}\n",
+ EscalationPolicy.PUBLIC_STEP_CHOICES_MAP[
+ EscalationPolicy.STEP_NOTIFY_USERS_QUEUE
+ ]: '\nresource "amixr_escalation" "{}" {{\n'
+ " escalation_chain_id = {}\n"
+ ' type = "{}"\n'
+ " persons_to_notify_next_each_time = {}\n"
+ " position = {}\n"
+ "}}\n",
+ EscalationPolicy.PUBLIC_STEP_CHOICES_MAP[
+ EscalationPolicy.STEP_NOTIFY_SCHEDULE
+ ]: '\nresource "amixr_escalation" "{}" {{\n'
+ " escalation_chain_id = {}\n"
+ ' type = "{}"\n'
+ " important = {}\n"
+ " notify_on_call_from_schedule = {}\n"
+ " position = {}\n"
+ "}}\n",
+ EscalationPolicy.PUBLIC_STEP_CHOICES_MAP[
+ EscalationPolicy.STEP_NOTIFY_GROUP
+ ]: '\nresource "amixr_escalation" "{}" {{\n'
+ " escalation_chain_id = {}\n"
+ ' type = "{}"\n'
+ " important = {}\n"
+ " group_to_notify = {}\n"
+ " position = {}\n"
+ "}}\n",
+ EscalationPolicy.PUBLIC_STEP_CHOICES_MAP[
+ EscalationPolicy.STEP_TRIGGER_CUSTOM_BUTTON
+ ]: '\nresource "amixr_escalation" "{}" {{\n'
+ " escalation_chain_id = {}\n"
+ ' type = "{}"\n'
+ " action_to_trigger = {}\n"
+ " position = {}\n"
+ "}}\n",
+ EscalationPolicy.PUBLIC_STEP_CHOICES_MAP[
+ EscalationPolicy.STEP_NOTIFY_IF_TIME
+ ]: '\nresource "amixr_escalation" "{}" {{\n'
+ " escalation_chain_id = {}\n"
+ ' type = "{}"\n'
+ " notify_if_time_from = {}\n"
+ " notify_if_time_to = {}\n"
+ " position = {}\n"
+ "}}\n",
+ EscalationPolicy.PUBLIC_STEP_CHOICES_MAP[
+ EscalationPolicy.STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW
+ ]: '\nresource "amixr_escalation" "{}" {{\n'
+ " escalation_chain_id = {}\n"
+ ' type = "{}"\n'
+ " num_alerts_in_window = {}\n"
+ " num_minutes_in_window = {}\n"
+ " position = {}\n"
+ "}}\n",
+ "step_is_none": '\nresource "amixr_escalation" "{}" {{\n'
+ " escalation_chain_id = {}\n"
+ " type = null\n"
+ " position = {}\n"
+ "}}\n",
+ "other_steps": '\nresource "amixr_escalation" "{}" {{\n'
+ " escalation_chain_id = {}\n"
+ ' type = "{}"\n'
+ " position = {}\n"
+ "}}\n",
+ }
+
+ AMIXR_USERS_LIST_TEMPLATE = "[\n {}\n ]"
+ AMIXR_USERS_LIST_TEMPLATE_EMPTY = "[]"
+ ROLLING_USERS_TEMPLATE = " [{}],\n"
+ ROLLING_USERS_LIST_TEMPLATE = "[\n{} ]"
+
+ SCHEDULE_RESOURCE_TEMPLATE_ICAL = (
+ '\nresource "amixr_schedule" "{}" {{\n'
+ ' name = "{}"\n'
+ ' type = "ical"\n'
+ " team_id = {}\n"
+ " ical_url_primary = {}\n"
+ " ical_url_overrides = {}\n"
+ "{}"
+ "}}\n"
+ )
+
+ SCHEDULE_RESOURCE_TEMPLATE_CALENDAR = (
+ '\nresource "amixr_schedule" "{}" {{\n'
+ ' name = "{}"\n'
+ ' type = "calendar"\n'
+ " team_id = {}\n"
+ ' time_zone = "{}"\n'
+ "{}"
+ "}}\n"
+ )
+
+ SCHEDULE_RESOURCE_SLACK_TEMPLATE = " slack {{\n channel_id = {}\n }}\n"
+
+ ON_CALL_SHIFT_RESOURCE_TEMPLATE_RECURRENT_EVENT = (
+ '\nresource "amixr_on_call_shift" "{}" {{\n'
+ ' name = "{}"\n'
+ ' type = "{}"\n'
+ " team_id = {}\n"
+ ' start = "{}"\n'
+ " duration = {}\n"
+ " level = {}\n"
+ ' frequency = "{}"\n'
+ " interval = {}\n"
+ ' week_start = "{}"\n'
+ " by_day = {}\n"
+ " by_month = {}\n"
+ " by_monthday = {}\n"
+ " users = {}\n"
+ "}}\n"
+ )
+
+ ON_CALL_SHIFT_RESOURCE_TEMPLATE_ROLLING_USERS = (
+ '\nresource "amixr_on_call_shift" "{}" {{\n'
+ ' name = "{}"\n'
+ ' type = "{}"\n'
+ " team_id = {}\n"
+ ' start = "{}"\n'
+ " duration = {}\n"
+ " level = {}\n"
+ ' frequency = "{}"\n'
+ " interval = {}\n"
+ ' week_start = "{}"\n'
+ " by_day = {}\n"
+ " by_month = {}\n"
+ " by_monthday = {}\n"
+ " rolling_users = {}\n"
+ "}}\n"
+ )
+
+ ON_CALL_SHIFT_RESOURCE_TEMPLATE_SINGLE_EVENT = (
+ '\nresource "amixr_on_call_shift" "{}" {{\n'
+ ' name = "{}"\n'
+ ' type = "{}"\n'
+ " team_id = {}\n"
+ ' start = "{}"\n'
+ " duration = {}\n"
+ " level = {}\n"
+ " users = {}\n"
+ "}}\n"
+ )
+
+ def __init__(self, organization):
+ self.organization = organization
+ self.data = {}
+ self.used_names = {}
+
+ def render_terraform_file(self):
+ result = self.render_resource_text()
+ data_result = self.render_data_text()
+ result = data_result + result
+ if len(result) == 0:
+ result += "There is nothing here yet. Check Settings to add integration and come back!"
+ return result
+
+ def render_resource_text(self):
+ result = ""
+
+ result += self.render_escalation_chains_related_resources_text()
+
+ integrations_related_resources_text = self.render_integrations_related_resources_text()
+ result += integrations_related_resources_text
+
+ shifts_related_resources_text = self.render_on_call_shift_resource_text()
+ result += shifts_related_resources_text
+
+ schedules_related_resources_text = self.render_schedules_related_resources_text()
+ result += schedules_related_resources_text
+
+ return result
+
+ def render_escalation_chains_related_resources_text(self):
+ result = ""
+ escalation_chains = self.organization.escalation_chains.all()
+
+ for escalation_chain in escalation_chains:
+ resource_name = self.escape_string_for_terraform(escalation_chain.name)
+ team_name = self.render_team_name(escalation_chain.team)
+ team_name_text = f"data.amixr_team.{team_name}.id" if team_name else "null"
+ result += self.ESCALATION_CHAIN_RESOURCE_TEMPLATE.format(
+ resource_name, escalation_chain.name, team_name_text
+ )
+ result += self.render_escalation_policy_resource_text(escalation_chain, resource_name)
+
+ return result
+
+ def render_escalation_policy_resource_text(self, escalation_chain, escalation_chain_resource_name):
+ result = ""
+ escalation_policies = escalation_chain.escalation_policies.all()
+
+ escalation_chain_id = f"amixr_escalation_chain.{escalation_chain_resource_name}.id"
+
+ for num, escalation_policy in enumerate(escalation_policies, start=1):
+ escalation_name = f"escalation-{num}-{escalation_chain_resource_name}"
+ step_type = None
+
+ if escalation_policy.step is None:
+ result += TerraformFileRenderer.ESCALATION_POLICY_TEMPLATES["step_is_none"].format(
+ escalation_name,
+ escalation_chain_id,
+ escalation_policy.order,
+ )
+ else:
+ step_type = EscalationPolicy.PUBLIC_STEP_CHOICES_MAP[escalation_policy.step]
+
+ if escalation_policy.step == EscalationPolicy.STEP_WAIT:
+ wait_delay = escalation_policy.wait_delay
+ delay = int(wait_delay.total_seconds()) if wait_delay is not None else "null"
+ result += TerraformFileRenderer.ESCALATION_POLICY_TEMPLATES[step_type].format(
+ escalation_name,
+ escalation_chain_id,
+ step_type,
+ delay,
+ escalation_policy.order,
+ )
+ elif escalation_policy.step in [
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
+ EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
+ ]:
+ persons_to_notify = escalation_policy.sorted_users_queue
+ important = escalation_policy.step == EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT
+ important = "true" if important else "false"
+ rendered_persons_to_notify = self.render_amixr_users_list_text(persons_to_notify)
+
+ if escalation_policy.step == EscalationPolicy.STEP_NOTIFY_USERS_QUEUE:
+ result += TerraformFileRenderer.ESCALATION_POLICY_TEMPLATES[step_type].format(
+ escalation_name,
+ escalation_chain_id,
+ step_type,
+ rendered_persons_to_notify,
+ escalation_policy.order,
+ )
+ else:
+ result += TerraformFileRenderer.ESCALATION_POLICY_TEMPLATES[step_type].format(
+ escalation_name,
+ escalation_chain_id,
+ step_type,
+ important,
+ rendered_persons_to_notify,
+ escalation_policy.order,
+ )
+ elif escalation_policy.step in [
+ EscalationPolicy.STEP_NOTIFY_SCHEDULE,
+ EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT,
+ ]:
+ schedule = escalation_policy.notify_schedule
+ schedule_name = self.render_name(schedule, "schedules", "name")
+ schedule_text = "null"
+ if schedule is not None:
+ schedule_text = f"amixr_schedule.{schedule_name}.id"
+
+ important = escalation_policy.step == EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT
+ important = "true" if important else "false"
+
+ result += TerraformFileRenderer.ESCALATION_POLICY_TEMPLATES[step_type].format(
+ escalation_name,
+ escalation_chain_id,
+ step_type,
+ important,
+ schedule_text,
+ escalation_policy.order,
+ )
+ elif escalation_policy.step in [
+ EscalationPolicy.STEP_NOTIFY_GROUP,
+ EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT,
+ ]:
+ user_group = escalation_policy.notify_to_group
+ user_group_name = self.render_user_group_name(user_group)
+ important = escalation_policy.step == EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT
+ important = "true" if important else "false"
+ user_group_text = f"data.amixr_user_group.{user_group_name}.id" if user_group else "null"
+ result += TerraformFileRenderer.ESCALATION_POLICY_TEMPLATES[step_type].format(
+ escalation_name,
+ escalation_chain_id,
+ step_type,
+ important,
+ user_group_text,
+ escalation_policy.order,
+ )
+ # TODO: uncomment after custom actions refactoring
+ # elif escalation_policy.step == EscalationPolicy.STEP_TRIGGER_CUSTOM_BUTTON:
+ # custom_action = escalation_policy.custom_button_trigger
+ # custom_action_name = self.render_name(custom_action, "custom_actions", "name")
+ # self.render_custom_action_data_source(custom_action, custom_action_name, integration_resource_name)
+ # custom_action_text = f"data.amixr_action.{custom_action_name}.id" if custom_action else "null"
+ # result += TerraformFileRenderer.ESCALATION_POLICY_TEMPLATES[step_type].format(
+ # escalation_name, escalation_chain_id, step_type, custom_action_text, escalation_policy.order,
+ # )
+ elif escalation_policy.step == EscalationPolicy.STEP_NOTIFY_IF_TIME:
+ from_time = self.render_time_string(escalation_policy.from_time)
+ to_time = self.render_time_string(escalation_policy.to_time)
+ result += TerraformFileRenderer.ESCALATION_POLICY_TEMPLATES[step_type].format(
+ escalation_name,
+ escalation_chain_id,
+ step_type,
+ from_time,
+ to_time,
+ escalation_policy.order,
+ )
+ elif escalation_policy.step == EscalationPolicy.STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW:
+ result += TerraformFileRenderer.ESCALATION_POLICY_TEMPLATES[step_type].format(
+ escalation_name,
+ escalation_chain_id,
+ step_type,
+ escalation_policy.num_alerts_in_window,
+ escalation_policy.num_minutes_in_window,
+ escalation_policy.order,
+ )
+ elif escalation_policy.step is not None:
+ result += TerraformFileRenderer.ESCALATION_POLICY_TEMPLATES["other_steps"].format(
+ escalation_name,
+ escalation_chain_id,
+ step_type,
+ escalation_policy.order,
+ )
+ return result
+
+ def render_integrations_related_resources_text(self):
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+ result = ""
+ integrations = self.organization.alert_receive_channels.all().order_by("created_at")
+ for integration in integrations:
+ integration_resource_name = self.render_name(integration, "integrations", "verbal_name")
+ team_name = self.render_team_name(integration.team)
+ team_name_text = f"data.amixr_team.{team_name}.id" if team_name else "null"
+ formatted_integration_name = self.escape_string_for_terraform(integration.verbal_name)
+ templates = self.render_integration_template(integration)
+ if templates is not None:
+ result += TerraformFileRenderer.INTEGRATION_RESOURCE_TEMPLATE_WITH_TEMPLATES.format(
+ integration_resource_name,
+ formatted_integration_name,
+ AlertReceiveChannel.INTEGRATIONS_TO_REVERSE_URL_MAP[integration.integration],
+ team_name_text,
+ templates,
+ )
+ else:
+ result += TerraformFileRenderer.INTEGRATION_RESOURCE_TEMPLATE.format(
+ integration_resource_name,
+ formatted_integration_name,
+ AlertReceiveChannel.INTEGRATIONS_TO_REVERSE_URL_MAP[integration.integration],
+ team_name_text,
+ )
+ route_text = self.render_route_resource_text(integration, integration_resource_name)
+ # render data sources for custom actions just after integration resource
+ actions_data_text = self.render_action_data_text()
+ result += actions_data_text
+
+ result += route_text
+ return result
+
+ def render_route_resource_text(self, integration, integration_resource_name):
+ SlackChannel = apps.get_model("slack", "SlackChannel")
+ slack_team_identity = self.organization.slack_team_identity
+ slack_channels_q = SlackChannel.objects.filter(
+ slack_id=OuterRef("slack_channel_id"),
+ slack_team_identity=slack_team_identity,
+ )
+ routes = integration.channel_filters.all().annotate(
+ slack_channel_name=Subquery(slack_channels_q.values("name")[:1])
+ )
+ result = ""
+ for num, route in enumerate(routes, start=1):
+ if route.is_default:
+ continue
+ route_name = f"route-{num}-{integration_resource_name}"
+
+ escalation_chain_resource_name = "amixr_escalation_chain." + self.escape_string_for_terraform(
+ route.escalation_chain.name
+ )
+
+ routing_regex = self.escape_string_for_terraform(route.filtering_term)
+ if route.slack_channel_id is not None:
+ if route.slack_channel_name is not None:
+ slack_channel_data_name = f"slack-channel-{route.slack_channel_name}"
+ slack_channel_id = f"data.amixr_slack_channel.{slack_channel_data_name}.slack_id"
+ if route.slack_channel_name not in self.data.setdefault("slack_channels", {}):
+ data_result = TerraformFileRenderer.SLACK_CHANNEL_DATA_TEMPLATE.format(
+ slack_channel_data_name,
+ route.slack_channel_name,
+ )
+ self.data["slack_channels"][route.slack_channel_name] = data_result
+ else:
+ slack_channel_id = f'"{route.slack_channel_id}"'
+ result += TerraformFileRenderer.ROUTE_RESOURCES_TEMPLATE_WITH_SLACK.format(
+ route_name,
+ integration_resource_name,
+ escalation_chain_resource_name,
+ routing_regex,
+ route.order,
+ slack_channel_id,
+ )
+ else:
+ result += TerraformFileRenderer.ROUTE_RESOURCES_TEMPLATE.format(
+ route_name,
+ integration_resource_name,
+ escalation_chain_resource_name,
+ routing_regex,
+ route.order,
+ )
+
+ return result
+
+ def render_schedules_related_resources_text(self):
+ SlackChannel = apps.get_model("slack", "SlackChannel")
+ slack_team_identity = self.organization.slack_team_identity
+ slack_channels_q = SlackChannel.objects.filter(
+ slack_id=OuterRef("channel"),
+ slack_team_identity=slack_team_identity,
+ )
+ schedules = self.organization.oncall_schedules.annotate(
+ slack_channel_name=Subquery(slack_channels_q.values("name")[:1])
+ ).order_by("pk")
+ result = ""
+ for schedule in schedules:
+ schedule_name = self.render_name(schedule, "schedules", "name")
+ formatted_schedule_name = self.escape_string_for_terraform(schedule.name)
+ team_name = self.render_team_name(schedule.team)
+ team_name_text = f"data.amixr_team.{team_name}.id" if team_name else "null"
+ slack_channel_text = ""
+ if schedule.channel is not None:
+ if schedule.slack_channel_name is not None:
+ slack_channel_data_name = f"slack-channel-{schedule.slack_channel_name}"
+ slack_channel_id = f"data.amixr_slack_channel.{slack_channel_data_name}.slack_id"
+ if schedule.slack_channel_name not in self.data.setdefault("slack_channels", {}):
+ data_result = TerraformFileRenderer.SLACK_CHANNEL_DATA_TEMPLATE.format(
+ slack_channel_data_name,
+ schedule.slack_channel_name,
+ )
+ self.data["slack_channels"][schedule.slack_channel_name] = data_result
+ else:
+ slack_channel_id = f'"{schedule.channel}"'
+
+ slack_channel_text = TerraformFileRenderer.SCHEDULE_RESOURCE_SLACK_TEMPLATE.format(slack_channel_id)
+
+ if isinstance(schedule, OnCallScheduleICal):
+ ical_url_primary = f'"{schedule.ical_url_primary}"' if schedule.ical_url_primary else "null"
+ ical_url_overrides = f'"{schedule.ical_url_overrides}"' if schedule.ical_url_overrides else "null"
+ result += TerraformFileRenderer.SCHEDULE_RESOURCE_TEMPLATE_ICAL.format(
+ schedule_name,
+ formatted_schedule_name,
+ team_name_text,
+ ical_url_primary,
+ ical_url_overrides,
+ slack_channel_text,
+ )
+
+ elif isinstance(schedule, OnCallScheduleCalendar):
+ result += TerraformFileRenderer.SCHEDULE_RESOURCE_TEMPLATE_CALENDAR.format(
+ schedule_name, formatted_schedule_name, team_name_text, schedule.time_zone, slack_channel_text
+ )
+ return result
+
+ def render_on_call_shift_resource_text(self):
+ CustomOnCallShift = apps.get_model("schedules", "CustomOnCallShift")
+ result = ""
+ on_call_shifts = self.organization.custom_on_call_shifts.all().order_by("pk")
+
+ for shift in on_call_shifts:
+ shift_name = self.render_name(shift, "on_call_shifts", "name")
+ team_name = self.render_team_name(shift.team)
+ team_name_text = f"data.amixr_team.{team_name}.id" if team_name else "null"
+ formatted_integration_name = self.escape_string_for_terraform(shift.name)
+ shift_type = CustomOnCallShift.PUBLIC_TYPE_CHOICES_MAP[shift.type]
+ start = shift.start.strftime("%Y-%m-%dT%H:%M:%S")
+ duration = int(shift.duration.total_seconds())
+ level = shift.priority_level
+ frequency = CustomOnCallShift.PUBLIC_FREQUENCY_CHOICES_MAP.get(shift.frequency)
+ interval = shift.interval or "null"
+ week_start = CustomOnCallShift.ICAL_WEEKDAY_MAP[shift.week_start]
+ by_day = self.replace_quotes(f"{shift.by_day}") if shift.by_day else "null"
+ by_month = self.replace_quotes(f"{shift.by_day}") if shift.by_month else "null"
+ by_monthday = self.replace_quotes(f"{shift.by_day}") if shift.by_monthday else "null"
+
+ if shift.type == CustomOnCallShift.TYPE_ROLLING_USERS_EVENT:
+ rolling_amixr_users = shift.get_rolling_users()
+ rendered_amixr_users = self.render_rolling_users_list_text(rolling_amixr_users)
+ result += TerraformFileRenderer.ON_CALL_SHIFT_RESOURCE_TEMPLATE_ROLLING_USERS.format(
+ shift_name,
+ formatted_integration_name,
+ shift_type,
+ team_name_text,
+ start,
+ duration,
+ level,
+ frequency,
+ interval,
+ week_start,
+ by_day,
+ by_month,
+ by_monthday,
+ rendered_amixr_users,
+ )
+ else:
+ amixr_users = shift.users.all()
+ rendered_amixr_users = self.render_amixr_users_list_text(amixr_users)
+
+ if shift.type == CustomOnCallShift.TYPE_SINGLE_EVENT:
+ result += TerraformFileRenderer.ON_CALL_SHIFT_RESOURCE_TEMPLATE_SINGLE_EVENT.format(
+ shift_name,
+ formatted_integration_name,
+ shift_type,
+ team_name_text,
+ start,
+ duration,
+ level,
+ rendered_amixr_users,
+ )
+ elif shift.type == CustomOnCallShift.TYPE_RECURRENT_EVENT:
+ result += TerraformFileRenderer.ON_CALL_SHIFT_RESOURCE_TEMPLATE_RECURRENT_EVENT.format(
+ shift_name,
+ formatted_integration_name,
+ shift_type,
+ team_name_text,
+ start,
+ duration,
+ level,
+ frequency,
+ interval,
+ week_start,
+ by_day,
+ by_month,
+ by_monthday,
+ rendered_amixr_users,
+ )
+ return result
+
+ def render_data_text(self):
+ result = ""
+ for data_type, data_source in sorted(self.data.items(), key=lambda x: x[0], reverse=True):
+ for data_result in data_source:
+ result += data_source[data_result]
+ return result
+
+ def render_action_data_text(self):
+ result = ""
+ actions = self.data.pop("custom_actions", {})
+ for action, action_text in actions.items():
+ result += action_text
+ return result
+
+ def render_names_and_data_for_amixr_users(self, amixr_users):
+ amixr_users_names = []
+ for user in amixr_users:
+ user_data_name = self.render_name(user, "users", "username")
+ amixr_users_names.append(user_data_name)
+ if user.public_primary_key not in self.data.setdefault("users", {}):
+ data_result = TerraformFileRenderer.AMIXR_USER_DATA_TEMPLATE.format(user_data_name, user.username)
+ self.data["users"][user.public_primary_key] = data_result
+ return amixr_users_names
+
+ def render_rolling_users_list_text(self, rolling_amixr_users):
+ if rolling_amixr_users:
+ rolling_users_text = ""
+ for amixr_users in rolling_amixr_users:
+ if amixr_users:
+ amixr_users_names = self.render_names_and_data_for_amixr_users(amixr_users)
+ rendered_amixr_users = ", ".join(
+ [f"data.amixr_user.{user_verbal}.id" for user_verbal in amixr_users_names]
+ )
+ else:
+ rendered_amixr_users = TerraformFileRenderer.AMIXR_USERS_LIST_TEMPLATE_EMPTY
+ rolling_users_text += TerraformFileRenderer.ROLLING_USERS_TEMPLATE.format(rendered_amixr_users)
+ rendered_rolling_users = TerraformFileRenderer.ROLLING_USERS_LIST_TEMPLATE.format(rolling_users_text)
+ else:
+ rendered_rolling_users = TerraformFileRenderer.AMIXR_USERS_LIST_TEMPLATE_EMPTY
+ return rendered_rolling_users
+
+ def render_amixr_users_list_text(self, amixr_users):
+ if amixr_users:
+ amixr_users_names = self.render_names_and_data_for_amixr_users(amixr_users)
+ rendered_amixr_users = TerraformFileRenderer.AMIXR_USERS_LIST_TEMPLATE.format(
+ ", ".join([f"data.amixr_user.{user_verbal}.id" for user_verbal in amixr_users_names])
+ )
+ else:
+ rendered_amixr_users = TerraformFileRenderer.AMIXR_USERS_LIST_TEMPLATE_EMPTY
+ return rendered_amixr_users
+
+ def render_name(self, obj, obj_verbal, name_attr):
+ if obj is None:
+ return None
+ obj_name = slugify(getattr(obj, name_attr))
+ obj_data_name = obj_name
+ counter = 1
+ while (
+ obj_data_name in self.used_names.setdefault(obj_verbal, {})
+ and self.used_names[obj_verbal][obj_data_name] != obj.pk
+ ):
+ counter += 1
+ obj_data_name = f"{obj_name}-{counter}"
+ self.used_names[obj_verbal][obj_data_name] = obj.pk
+ return obj_data_name
+
+ def render_schedule_data_source(self, schedule, schedule_data_name):
+ if schedule is None:
+ return None
+ if schedule.pk not in self.data.setdefault("schedules", {}):
+ formatted_schedule_name = self.escape_string_for_terraform(schedule.name)
+ data_result = TerraformFileRenderer.SCHEDULE_DATA_TEMPLATE.format(
+ schedule_data_name, formatted_schedule_name
+ )
+ self.data["schedules"][schedule.pk] = data_result
+
+ def render_user_group_name(self, user_group):
+ if user_group is None:
+ return None
+ user_group_data_name = slugify(user_group.handle)
+ if user_group.pk not in self.data.setdefault("user_groups", {}):
+ data_result = TerraformFileRenderer.USER_GROUP_DATA_TEMPLATE.format(user_group_data_name, user_group.handle)
+ self.data["user_groups"][user_group.pk] = data_result
+ return user_group_data_name
+
+ def render_team_name(self, team):
+ if team is None:
+ return None
+
+ team_data_name = slugify(team.name)
+ if team.pk not in self.data.setdefault("teams", {}):
+ data_result = TerraformFileRenderer.TEAM_DATA_TEMPLATE.format(team_data_name, team.name)
+ self.data["teams"][team.pk] = data_result
+ return team_data_name
+
+ def render_custom_action_data_source(self, custom_action, custom_action_data_name, integration_resource_name):
+ if custom_action is None:
+ return None
+ if custom_action.pk not in self.data.setdefault("custom_actions", {}):
+ formatted_action_name = self.escape_string_for_terraform(custom_action.name)
+ data_result = TerraformFileRenderer.CUSTOM_ACTION_DATA_TEMPLATE.format(
+ custom_action_data_name,
+ formatted_action_name,
+ integration_resource_name,
+ )
+ self.data["custom_actions"][custom_action.pk] = data_result
+
+ def render_integration_template(self, integration):
+ result_template = None
+ slack_template = self.render_integration_slack_template(integration)
+ template_fields = {
+ "resolve_signal": integration.resolve_condition_template,
+ "grouping_key": integration.grouping_id_template,
+ }
+
+ templates = {}
+ for template_type, template in template_fields.items():
+ if template is not None:
+ if "\n" in template:
+ result = ""
+ template_lines = template.split("\n")
+ for line in template_lines:
+ result += f" {line}\n"
+ result = "<<-EOT\n" "{}" " EOT".format(result)
+ templates[template_type] = result
+ else:
+ template = self.escape_string_for_terraform(template)
+ templates[template_type] = f'"{template}"'
+
+ if len(templates) > 0 or slack_template:
+ result_template = "{\n"
+ for template_type, template in templates.items():
+ result_template += f" {template_type} = {template}\n"
+ if slack_template:
+ result_template += f" slack {slack_template}"
+ result_template += " }"
+
+ return result_template
+
+ def render_integration_slack_template(self, integration):
+ slack_template = None
+ slack_fields = {
+ "title": integration.slack_title_template,
+ "message": integration.slack_message_template,
+ "image_url": integration.slack_image_url_template,
+ }
+ slack_templates = {}
+ for template_type, template in slack_fields.items():
+ if template is not None:
+ if "\n" in template:
+ result = ""
+ template_lines = template.split("\n")
+ for line in template_lines:
+ result += f" {line}\n"
+ result = "<<-EOT\n" "{}" " EOT".format(result)
+ slack_templates[template_type] = result
+ else:
+ template = self.escape_string_for_terraform(template)
+ slack_templates[template_type] = f'"{template}"'
+
+ if len(slack_templates) > 0:
+ slack_template = "{\n"
+ for template_type, template in slack_templates.items():
+ slack_template += f" {template_type} = {template}\n"
+ slack_template += " }\n"
+
+ return slack_template
+
+ def render_time_string(self, time_obj):
+ result = f"\"{time_obj.strftime('%H:%M:%SZ')}\"" if time_obj is not None else "null"
+ return result
+
+ def escape_string_for_terraform(self, template_line):
+ template_line = template_line.replace("\\", "\\\\")
+ template_line = template_line.replace('"', r"\"")
+ return template_line
+
+ def replace_quotes(self, template_line):
+ template_line = template_line.replace("'", r'"')
+ return template_line
diff --git a/engine/apps/alerts/terraform_renderer/terraform_state_renderer.py b/engine/apps/alerts/terraform_renderer/terraform_state_renderer.py
new file mode 100644
index 0000000000..d8770d8d4c
--- /dev/null
+++ b/engine/apps/alerts/terraform_renderer/terraform_state_renderer.py
@@ -0,0 +1,122 @@
+from django.utils.text import slugify
+
+from apps.schedules.models import OnCallScheduleCalendar
+
+
+class TerraformStateRenderer:
+
+ STATE_INTEGRATION_TEMPLATE = "terraform import amixr_integration.{} {}\n"
+ STATE_ROUTE_TEMPLATE = "terraform import amixr_route.{} {}\n"
+ STATE_ESCALATION_CHAIN_TEMPLATE = "terraform import amixr_escalation_chain.{} {}\n"
+ STATE_ESCALATION_TEMPLATE = "terraform import amixr_escalation.{} {}\n"
+ STATE_SCHEDULE_TEMPLATE = "terraform import amixr_schedule.{} {}\n"
+ STATE_ON_CALL_SHIFT_TEMPLATE = "terraform import amixr_on_call_shift.{} {}\n"
+
+ def __init__(self, organization):
+ self.organization = organization
+ self.used_names = {}
+
+ def render_state(self):
+ result = self.render_state_text()
+ if len(result) == 0:
+ result += "There is nothing here yet. Check Settings to add integration and come back!"
+ return result
+
+ def render_state_text(self):
+ result = ""
+
+ result += self.render_escalation_chains_related_states_text()
+
+ integrations_related_states_text = self.render_integrations_related_states_text()
+ result += integrations_related_states_text
+
+ schedules_related_states_text = self.render_schedule_related_states_text()
+ result += schedules_related_states_text
+
+ return result
+
+ def render_escalation_chains_related_states_text(self):
+ result = ""
+ escalation_chains = self.organization.escalation_chains.all()
+ for escalation_chain in escalation_chains:
+ resource_name = self.render_name(escalation_chain, "escalation_chains", "name")
+ result += self.STATE_ESCALATION_CHAIN_TEMPLATE.format(resource_name, escalation_chain.public_primary_key)
+
+ result += self.render_escalation_policy_state_text(escalation_chain, resource_name)
+ return result
+
+ def render_integrations_related_states_text(self):
+ result = ""
+ integrations = self.organization.alert_receive_channels.all().order_by("created_at")
+ for integration in integrations:
+ integration_resource_name = self.render_name(integration, "integrations", "verbal_name")
+ result += TerraformStateRenderer.STATE_INTEGRATION_TEMPLATE.format(
+ integration_resource_name,
+ integration.public_primary_key,
+ )
+ route_text = self.render_route_state_text(integration, integration_resource_name)
+ result += route_text
+ return result
+
+ def render_schedule_related_states_text(self):
+ result = ""
+ ical_schedules = self.organization.oncall_schedules.order_by("pk")
+ for schedule in ical_schedules:
+ schedule_resource_name = self.render_name(schedule, "schedules", "name")
+ result += TerraformStateRenderer.STATE_SCHEDULE_TEMPLATE.format(
+ schedule_resource_name,
+ schedule.public_primary_key,
+ )
+ if isinstance(schedule, OnCallScheduleCalendar):
+ on_call_shifts_text = self.render_on_call_shift_state_text(schedule)
+ result += on_call_shifts_text
+ return result
+
+ def render_route_state_text(self, integration, integration_resource_name):
+ routes = integration.channel_filters.all()
+ result = ""
+ for num, route in enumerate(routes, start=1):
+ if route.is_default:
+ continue
+ route_name = f"route-{num}-{integration_resource_name}"
+ result += TerraformStateRenderer.STATE_ROUTE_TEMPLATE.format(
+ route_name,
+ route.public_primary_key,
+ )
+
+ return result
+
+ def render_escalation_policy_state_text(self, escalation_chain, escalation_chain_resource_name):
+ result = ""
+ escalation_policies = escalation_chain.escalation_policies.all()
+ for num, escalation_policy in enumerate(escalation_policies, start=1):
+ escalation_name = f"escalation-{num}-{escalation_chain_resource_name}"
+ result += TerraformStateRenderer.STATE_ESCALATION_TEMPLATE.format(
+ escalation_name,
+ escalation_policy.public_primary_key,
+ )
+ return result
+
+ def render_on_call_shift_state_text(self, schedule):
+ result = ""
+ on_call_shifts = schedule.custom_on_call_shifts.all().order_by("pk")
+ for shift in on_call_shifts:
+ shift_name = self.render_name(shift, "on_call_shifts", "name")
+ result += TerraformStateRenderer.STATE_ON_CALL_SHIFT_TEMPLATE.format(
+ shift_name,
+ shift.public_primary_key,
+ )
+ return result
+
+ def render_name(self, obj, obj_verbal, name_attr):
+ obj_name = slugify(getattr(obj, name_attr))
+ obj_data_name = obj_name
+ counter = 1
+ while (
+ obj_data_name in self.used_names.setdefault(obj_verbal, {})
+ and self.used_names[obj_verbal][obj_data_name] != obj.pk
+ ):
+ counter += 1
+ obj_data_name = f"{obj_name}-{counter}"
+ self.used_names[obj_verbal][obj_data_name] = obj.pk
+ return obj_data_name
diff --git a/engine/apps/alerts/tests/__init__.py b/engine/apps/alerts/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/alerts/tests/conftest.py b/engine/apps/alerts/tests/conftest.py
new file mode 100644
index 0000000000..2550fdef01
--- /dev/null
+++ b/engine/apps/alerts/tests/conftest.py
@@ -0,0 +1,11 @@
+import pytest
+
+from apps.alerts.incident_appearance.templaters import AlertSlackTemplater
+
+
+@pytest.fixture()
+def mock_alert_renderer_render_for(monkeypatch):
+ def mock_render_for(*args, **kwargs):
+ return "invalid_render_for"
+
+ monkeypatch.setattr(AlertSlackTemplater, "_render_for", mock_render_for)
diff --git a/engine/apps/alerts/tests/factories.py b/engine/apps/alerts/tests/factories.py
new file mode 100644
index 0000000000..8888c0f016
--- /dev/null
+++ b/engine/apps/alerts/tests/factories.py
@@ -0,0 +1,84 @@
+import factory
+
+from apps.alerts.models import (
+ Alert,
+ AlertGroup,
+ AlertGroupLogRecord,
+ AlertReceiveChannel,
+ ChannelFilter,
+ CustomButton,
+ EscalationChain,
+ EscalationPolicy,
+ Invitation,
+ ResolutionNote,
+ ResolutionNoteSlackMessage,
+)
+from common.utils import UniqueFaker
+
+
+class AlertReceiveChannelFactory(factory.DjangoModelFactory):
+
+ # integration = AlertReceiveChannel.INTEGRATION_GRAFANA
+ verbal_name = factory.Faker("sentence", nb_words=2)
+
+ class Meta:
+ model = AlertReceiveChannel
+
+
+class ChannelFilterFactory(factory.DjangoModelFactory):
+ class Meta:
+ model = ChannelFilter
+
+
+class EscalationChainFactory(factory.DjangoModelFactory):
+ name = UniqueFaker("word")
+
+ class Meta:
+ model = EscalationChain
+
+
+class EscalationPolicyFactory(factory.DjangoModelFactory):
+ class Meta:
+ model = EscalationPolicy
+
+
+class AlertFactory(factory.DjangoModelFactory):
+ class Meta:
+ model = Alert
+
+
+class AlertGroupFactory(factory.DjangoModelFactory):
+ class Meta:
+ model = AlertGroup
+
+
+class AlertGroupLogRecordFactory(factory.DjangoModelFactory):
+ class Meta:
+ model = AlertGroupLogRecord
+
+
+class ResolutionNoteFactory(factory.DjangoModelFactory):
+
+ message_text = factory.Faker("sentence", nb_words=5)
+
+ class Meta:
+ model = ResolutionNote
+
+
+class ResolutionNoteSlackMessageFactory(factory.DjangoModelFactory):
+ class Meta:
+ model = ResolutionNoteSlackMessage
+
+
+class CustomActionFactory(factory.DjangoModelFactory):
+
+ webhook = factory.Faker("url")
+ name = UniqueFaker("word")
+
+ class Meta:
+ model = CustomButton
+
+
+class InvitationFactory(factory.DjangoModelFactory):
+ class Meta:
+ model = Invitation
diff --git a/engine/apps/alerts/tests/test_alert_group.py b/engine/apps/alerts/tests/test_alert_group.py
new file mode 100644
index 0000000000..16b3daf32a
--- /dev/null
+++ b/engine/apps/alerts/tests/test_alert_group.py
@@ -0,0 +1,97 @@
+import pytest
+
+from apps.alerts.incident_appearance.renderers.phone_call_renderer import AlertGroupPhoneCallRenderer
+from apps.alerts.models import AlertGroup
+from apps.alerts.tasks.delete_alert_group import delete_alert_group
+from apps.slack.models import SlackMessage
+from common.constants.role import Role
+
+
+@pytest.mark.django_db
+def test_render_for_phone_call(
+ make_organization_with_slack_team_identity,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_alert,
+):
+ organization, slack_team_identity = make_organization_with_slack_team_identity()
+ alert_receive_channel = make_alert_receive_channel(organization, integration_slack_channel_id="CWER1ASD")
+
+ alert_group = make_alert_group(alert_receive_channel)
+ SlackMessage.objects.create(channel_id="CWER1ASD", alert_group=alert_group)
+
+ alert_group = make_alert_group(alert_receive_channel)
+
+ make_alert(
+ alert_group,
+ raw_request_data={
+ "status": "firing",
+ "labels": {
+ "alertname": "TestAlert",
+ "region": "eu-1",
+ },
+ "annotations": {},
+ "startsAt": "2018-12-25T15:47:47.377363608Z",
+ "endsAt": "0001-01-01T00:00:00Z",
+ "generatorURL": "",
+ "amixr_demo": True,
+ },
+ )
+
+ expected_verbose_name = (
+ f"You are invited to check an incident from Grafana OnCall. "
+ f"Alert via {alert_receive_channel.verbal_name} - Grafana with title TestAlert triggered 1 times"
+ )
+ rendered_text = AlertGroupPhoneCallRenderer(alert_group).render()
+ assert expected_verbose_name in rendered_text
+
+
+@pytest.mark.django_db
+def test_delete(
+ make_organization_with_slack_team_identity,
+ make_user,
+ make_slack_channel,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_alert,
+):
+ """test alert group deleting"""
+
+ organization, slack_team_identity = make_organization_with_slack_team_identity()
+ slack_channel = make_slack_channel(slack_team_identity, name="general", slack_id="CWER1ASD")
+ user = make_user(organization=organization, role=Role.ADMIN)
+
+ alert_receive_channel = make_alert_receive_channel(organization, integration_slack_channel_id="CWER1ASD")
+
+ alert_group = make_alert_group(alert_receive_channel)
+ SlackMessage.objects.create(channel_id="CWER1ASD", alert_group=alert_group)
+
+ make_alert(
+ alert_group,
+ raw_request_data={
+ "evalMatches": [
+ {"value": 100, "metric": "High value", "tags": None},
+ {"value": 200, "metric": "Higher Value", "tags": None},
+ ],
+ "message": "Someone is testing the alert notification within grafana.",
+ "ruleId": 0,
+ "ruleName": "Test notification",
+ "ruleUrl": "http://localhost:3000/",
+ "state": "alerting",
+ "title": f"Incident for channel <#{slack_channel.slack_id}> Where a > b & c < d",
+ },
+ )
+
+ alerts = alert_group.alerts
+ slack_messages = alert_group.slack_messages
+
+ assert alerts.count() > 0
+ assert slack_messages.count() > 0
+
+ delete_alert_group(alert_group.pk, user.pk)
+
+ assert alerts.count() == 0
+ assert slack_messages.count() == 0
+
+ with pytest.raises(AlertGroup.DoesNotExist):
+ alert_group.refresh_from_db()
diff --git a/engine/apps/alerts/tests/test_alert_group_renderer.py b/engine/apps/alerts/tests/test_alert_group_renderer.py
new file mode 100644
index 0000000000..5253832ece
--- /dev/null
+++ b/engine/apps/alerts/tests/test_alert_group_renderer.py
@@ -0,0 +1,145 @@
+import pytest
+
+from apps.alerts.incident_appearance.templaters import AlertSlackTemplater
+from apps.alerts.models import AlertGroup
+from apps.integrations.metadata.configuration import grafana
+
+
+@pytest.mark.django_db
+def test_render_alert(
+ make_organization_and_user_with_slack_identities,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_alert,
+):
+ organization, _, _, _ = make_organization_and_user_with_slack_identities()
+ alert_receive_channel = make_alert_receive_channel(
+ organization,
+ )
+ alert_group = make_alert_group(alert_receive_channel)
+
+ alert = make_alert(alert_group=alert_group, raw_request_data=grafana.tests["payload"])
+
+ templater = AlertSlackTemplater(alert)
+ templated_alert = templater.render()
+ assert templated_alert.title == grafana.tests["slack"]["title"].format(
+ web_link=alert_group.web_link,
+ integration_name=alert_receive_channel.verbal_name,
+ )
+ assert templated_alert.message == grafana.tests["slack"]["message"]
+ assert templated_alert.image_url == grafana.tests["slack"]["image_url"]
+
+
+@pytest.mark.django_db
+def test_getattr_template(
+ make_organization_and_user_with_slack_identities,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_alert,
+):
+ slack_title_template = "Incident"
+ organization, _, _, _ = make_organization_and_user_with_slack_identities()
+ alert_receive_channel = make_alert_receive_channel(organization, slack_title_template=slack_title_template)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ alert = make_alert(alert_group=alert_group, raw_request_data={})
+
+ renderer = AlertSlackTemplater(alert)
+ template = renderer.template_manager.get_attr_template("title", alert_receive_channel, render_for="slack")
+ assert template == slack_title_template
+
+
+@pytest.mark.django_db
+def test_getattr_template_with_no_template(
+ make_organization_and_user_with_slack_identities,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_alert,
+):
+ organization, _, _, _ = make_organization_and_user_with_slack_identities()
+ alert_receive_channel = make_alert_receive_channel(
+ organization,
+ )
+ alert_group = make_alert_group(alert_receive_channel)
+
+ alert = make_alert(alert_group=alert_group, raw_request_data={})
+
+ renderer = AlertSlackTemplater(alert)
+ template = renderer.template_manager.get_attr_template("title", alert_receive_channel, render_for="slack")
+ assert template == grafana.slack_title
+
+
+@pytest.mark.django_db
+def test_getdefault_attr_template_non_existing(
+ make_organization_and_user_with_slack_identities,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_alert,
+):
+ organization, _, _, _ = make_organization_and_user_with_slack_identities()
+ alert_receive_channel = make_alert_receive_channel(
+ organization,
+ )
+ alert_group = make_alert_group(alert_receive_channel)
+
+ alert = make_alert(alert_group=alert_group, raw_request_data={})
+
+ renderer = AlertSlackTemplater(alert)
+ default_template = renderer.template_manager.get_default_attr_template(
+ "title", alert_receive_channel, render_for="invalid_render_for"
+ )
+ assert default_template is None
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "source,expected_text",
+ [
+ (AlertGroup.SOURCE, "Acknowledged by alert source"),
+ (AlertGroup.USER, "Acknowledged by {username}"),
+ ],
+)
+def test_get_acknowledge_text(
+ make_organization_and_user_with_slack_identities,
+ make_alert_receive_channel,
+ make_alert_group,
+ source,
+ expected_text,
+):
+ organization, user, _, _ = make_organization_and_user_with_slack_identities()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ alert_group.acknowledge(acknowledged_by=source, acknowledged_by_user=user)
+
+ assert alert_group.get_acknowledge_text() == expected_text.format(
+ username=user.get_user_verbal_for_team_for_slack()
+ )
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "source,expected_text",
+ [
+ (AlertGroup.SOURCE, "Resolved by alert source"),
+ (AlertGroup.ARCHIVED, "Resolved because alert has been archived"),
+ (AlertGroup.LAST_STEP, "Resolved automatically"),
+ (AlertGroup.WIPED, "Resolved by wipe"),
+ (AlertGroup.DISABLE_MAINTENANCE, "Resolved by stop maintenance"),
+ (AlertGroup.USER, "Resolved by {username}"),
+ ],
+)
+def test_get_resolved_text(
+ make_organization_and_user_with_slack_identities,
+ make_alert_receive_channel,
+ make_alert_group,
+ source,
+ expected_text,
+):
+ organization, user, _, _ = make_organization_and_user_with_slack_identities()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ alert_group.resolve(resolved_by=source, resolved_by_user=user)
+
+ assert alert_group.get_resolve_text() == expected_text.format(username=user.get_user_verbal_for_team_for_slack())
diff --git a/engine/apps/alerts/tests/test_alert_manager.py b/engine/apps/alerts/tests/test_alert_manager.py
new file mode 100644
index 0000000000..b78ef0df2b
--- /dev/null
+++ b/engine/apps/alerts/tests/test_alert_manager.py
@@ -0,0 +1,135 @@
+import pytest
+
+from apps.alerts.models.alert_manager_models import AlertForAlertManager, AlertGroupForAlertManager
+
+
+@pytest.mark.django_db
+def test_hash_based_on_labels():
+ alert_a = AlertForAlertManager(
+ raw_request_data={
+ "status": "firing",
+ "labels": {
+ "alertname": "TestAlert",
+ "region": "eu-1",
+ },
+ "annotations": {},
+ "startsAt": "2018-12-25T15:47:47.377363608Z",
+ "endsAt": "0001-01-01T00:00:00Z",
+ "generatorURL": "",
+ }
+ )
+
+ alert_b = AlertForAlertManager(
+ raw_request_data={
+ "status": "firing",
+ "labels": {
+ "alertname": "TestAlert",
+ "region": "us-east",
+ },
+ "annotations": {},
+ "startsAt": "2018-12-25T15:47:47.377363608Z",
+ "endsAt": "0001-01-01T00:00:00Z",
+ "generatorURL": "",
+ }
+ )
+
+ assert alert_a.get_integration_optimization_hash() != alert_b.get_integration_optimization_hash()
+
+ alert_c = AlertForAlertManager(
+ raw_request_data={
+ "status": "firing",
+ "labels": {
+ "alertname": "TestAlert",
+ "region": "us-east",
+ },
+ "annotations": {},
+ "startsAt": "2018-12-25T15:47:47.377363608Z",
+ "endsAt": "another time",
+ "generatorURL": "",
+ }
+ )
+
+ assert alert_b.get_integration_optimization_hash() == alert_c.get_integration_optimization_hash()
+
+
+@pytest.mark.django_db
+def test_alert_group_resolved(
+ make_organization, make_slack_team_identity, make_escalation_chain, make_channel_filter, make_alert_receive_channel
+):
+ team_identity = make_slack_team_identity()
+ organization = make_organization(slack_team_identity=team_identity)
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ escalation_chain = make_escalation_chain(organization)
+ make_channel_filter(alert_receive_channel, escalation_chain=escalation_chain)
+
+ alert_group = AlertGroupForAlertManager(channel=alert_receive_channel)
+ alert_group.save()
+
+ alert_a = AlertForAlertManager(
+ raw_request_data={
+ "status": "firing",
+ "labels": {
+ "alertname": "TestAlert",
+ "region": "eu-1",
+ },
+ "annotations": {},
+ "startsAt": "2018-12-25T15:47:47.377363608Z",
+ "endsAt": "0001-01-01T00:00:00Z",
+ "generatorURL": "",
+ },
+ group=alert_group,
+ )
+ alert_a.save()
+
+ alert_b = AlertForAlertManager(
+ raw_request_data={
+ "status": "firing",
+ "labels": {
+ "alertname": "TestAlert",
+ "region": "us-east",
+ },
+ "annotations": {},
+ "startsAt": "2018-12-25T15:47:47.377363608Z",
+ "endsAt": "0001-01-01T00:00:00Z",
+ "generatorURL": "",
+ },
+ group=alert_group,
+ )
+ alert_b.save()
+
+ alert_a_resolve = AlertForAlertManager(
+ raw_request_data={
+ "status": "resolved",
+ "labels": {
+ "region": "eu-1",
+ "alertname": "TestAlert",
+ },
+ "annotations": {},
+ "startsAt": "2018-12-25T15:47:47.377363608Z",
+ "endsAt": "another time",
+ "generatorURL": "",
+ },
+ group=alert_group,
+ )
+
+ assert alert_group.is_alert_a_resolve_signal(alert_a_resolve) is False
+ alert_a_resolve.save()
+
+ alert_b_resolve = AlertForAlertManager(
+ raw_request_data={
+ "status": "resolved",
+ "labels": {
+ "alertname": "TestAlert",
+ "region": "us-east",
+ },
+ "annotations": {},
+ "startsAt": "2018-12-25T15:47:47.377363608Z",
+ "endsAt": "0001-01-01T00:00:00Z",
+ "generatorURL": "",
+ },
+ group=alert_group,
+ )
+ alert_b_resolve.save()
+
+ assert alert_group.is_alert_a_resolve_signal(alert_b_resolve) is True
diff --git a/engine/apps/alerts/tests/test_alert_receiver_channel.py b/engine/apps/alerts/tests/test_alert_receiver_channel.py
new file mode 100644
index 0000000000..b8b70cb608
--- /dev/null
+++ b/engine/apps/alerts/tests/test_alert_receiver_channel.py
@@ -0,0 +1,149 @@
+from unittest import mock
+from unittest.mock import patch
+
+import pytest
+from django.urls import reverse
+
+from apps.alerts.models import AlertReceiveChannel
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "url",
+ [
+ "https://site.com/",
+ "https://site.com",
+ ],
+)
+def test_integration_url(make_organization, make_alert_receive_channel, url, settings):
+ settings.BASE_URL = url
+
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(organization)
+
+ path = reverse(
+ f"integrations:{AlertReceiveChannel.INTEGRATIONS_TO_REVERSE_URL_MAP[alert_receive_channel.integration]}",
+ kwargs={"alert_channel_key": alert_receive_channel.token},
+ )
+
+ # remove trailing / if present
+ if url[-1] == "/":
+ url = url[:-1]
+
+ assert alert_receive_channel.integration_url == f"{url}{path}"
+
+
+@pytest.mark.django_db
+def test_get_template_attribute_no_backends(make_organization, make_alert_receive_channel):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(organization, messaging_backends_templates=None)
+
+ attr = alert_receive_channel.get_template_attribute("TESTONLY", "title")
+ assert attr is None
+
+
+@pytest.mark.django_db
+def test_get_template_attribute_backend_not_set(make_organization, make_alert_receive_channel):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(
+ organization, messaging_backends_templates={"OTHER": {"title": "the-title"}}
+ )
+
+ attr = alert_receive_channel.get_template_attribute("TESTONLY", "title")
+ assert attr is None
+
+
+@pytest.mark.django_db
+def test_get_template_attribute_backend_attr_not_set(make_organization, make_alert_receive_channel):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(organization, messaging_backends_templates={"TESTONLY": {}})
+
+ attr = alert_receive_channel.get_template_attribute("TESTONLY", "title")
+ assert attr is None
+
+
+@pytest.mark.django_db
+def test_get_template_attribute_ok(make_organization, make_alert_receive_channel):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(
+ organization, messaging_backends_templates={"TESTONLY": {"title": "the-title"}}
+ )
+
+ attr = alert_receive_channel.get_template_attribute("TESTONLY", "title")
+ assert attr == "the-title"
+
+
+@pytest.mark.django_db
+def test_get_default_template_attribute_non_existing_backend(make_organization, make_alert_receive_channel):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(organization)
+
+ attr = alert_receive_channel.get_default_template_attribute("INVALID", "title")
+ assert attr is None
+
+
+@pytest.mark.django_db
+def test_get_default_template_attribute_fallback_to_web(make_organization, make_alert_receive_channel):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(organization)
+
+ attr = alert_receive_channel.get_default_template_attribute("TESTONLY", "title")
+ assert attr == alert_receive_channel.INTEGRATION_TO_DEFAULT_WEB_TITLE_TEMPLATE[alert_receive_channel.integration]
+
+
+@mock.patch("apps.integrations.tasks.create_alert.apply_async", return_value=None)
+@pytest.mark.django_db
+def test_send_demo_alert(mocked_create_alert, make_organization, make_alert_receive_channel):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_WEBHOOK
+ )
+ alert_receive_channel.send_demo_alert()
+ assert mocked_create_alert.called
+ assert mocked_create_alert.call_args.args[1]["is_demo"]
+ assert mocked_create_alert.call_args.args[1]["force_route_id"] is None
+
+
+@mock.patch("apps.integrations.tasks.create_alertmanager_alerts.apply_async", return_value=None)
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "integration",
+ [
+ AlertReceiveChannel.INTEGRATION_ALERTMANAGER,
+ AlertReceiveChannel.INTEGRATION_GRAFANA,
+ AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING,
+ ],
+)
+def test_send_demo_alert_alertmanager_payload_shape(
+ mocked_create_alert, make_organization, make_alert_receive_channel, integration
+):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(organization, integration=integration)
+ alert_receive_channel.send_demo_alert()
+ assert mocked_create_alert.called
+ assert mocked_create_alert.call_args.args[1]["is_demo"]
+ assert mocked_create_alert.call_args.args[1]["force_route_id"] is None
+
+
+@pytest.mark.django_db
+def test_notify_maintenance_no_general_channel(make_organization, make_alert_receive_channel):
+ organization = make_organization(general_log_channel_id=None)
+ alert_receive_channel = make_alert_receive_channel(organization)
+
+ with patch("apps.alerts.models.alert_receive_channel.post_message_to_channel") as mock_post_message:
+ alert_receive_channel.notify_about_maintenance_action("maintenance mode enabled")
+
+ assert mock_post_message.call_count == 0
+
+
+@pytest.mark.django_db
+def test_notify_maintenance_with_general_channel(make_organization, make_alert_receive_channel):
+ organization = make_organization(general_log_channel_id="CHANNEL-ID")
+ alert_receive_channel = make_alert_receive_channel(organization)
+
+ with patch("apps.alerts.models.alert_receive_channel.post_message_to_channel") as mock_post_message:
+ alert_receive_channel.notify_about_maintenance_action("maintenance mode enabled")
+
+ mock_post_message.assert_called_once_with(
+ organization, organization.general_log_channel_id, "maintenance mode enabled"
+ )
diff --git a/engine/apps/alerts/tests/test_channel_filter.py b/engine/apps/alerts/tests/test_channel_filter.py
new file mode 100644
index 0000000000..69b9c80e0d
--- /dev/null
+++ b/engine/apps/alerts/tests/test_channel_filter.py
@@ -0,0 +1,101 @@
+from unittest import mock
+
+import pytest
+
+from apps.alerts.models import AlertReceiveChannel, ChannelFilter
+
+
+@pytest.mark.django_db
+def test_channel_filter_select_filter(make_organization, make_alert_receive_channel, make_channel_filter):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+ filtering_term = "test alert"
+ channel_filter = make_channel_filter(alert_receive_channel, filtering_term=filtering_term, is_default=False)
+
+ title = "Test Title"
+
+ # alert with data which includes custom route filtering term, satisfied filter is custom channel filter
+ raw_request_data = {"title": filtering_term}
+ satisfied_filter = ChannelFilter.select_filter(alert_receive_channel, raw_request_data, title)
+ assert satisfied_filter == channel_filter
+
+ # alert with data which does not include custom route filtering term, satisfied filter is default channel filter
+ raw_request_data = {"title": title}
+ satisfied_filter = ChannelFilter.select_filter(alert_receive_channel, raw_request_data, title)
+ assert satisfied_filter == default_channel_filter
+
+ # demo alert for custom route
+ raw_request_data = {"title": "i'm not matching this route"}
+ satisfied_filter = ChannelFilter.select_filter(
+ alert_receive_channel, raw_request_data, title, force_route_id=channel_filter.pk
+ )
+ assert satisfied_filter == channel_filter
+
+
+@pytest.mark.django_db
+def test_channel_filter_notification_backends_repr(make_organization, make_alert_receive_channel, make_channel_filter):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(organization)
+
+ # extra backend is enabled
+ channel_filter = make_channel_filter(
+ alert_receive_channel,
+ notification_backends={"BACKEND": {"channel_id": "foobar", "enabled": True}},
+ )
+
+ assert "BACKEND notification allowed: Yes" in channel_filter.repr_settings_for_client_side_logging
+ assert "BACKEND channel: foobar" in channel_filter.repr_settings_for_client_side_logging
+
+ # backend is disabled
+ channel_filter_disabled_backend = make_channel_filter(
+ alert_receive_channel,
+ notification_backends={"BACKEND": {"channel_id": "foobar", "enabled": False}},
+ )
+ assert "BACKEND notification allowed: No" in channel_filter_disabled_backend.repr_settings_for_client_side_logging
+ assert "BACKEND channel: foobar" in channel_filter_disabled_backend.repr_settings_for_client_side_logging
+
+
+@mock.patch("apps.integrations.tasks.create_alert.apply_async", return_value=None)
+@pytest.mark.django_db
+def test_send_demo_alert(
+ mocked_create_alert,
+ make_organization,
+ make_alert_receive_channel,
+ make_channel_filter,
+):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_WEBHOOK
+ )
+ filtering_term = "test alert"
+ channel_filter = make_channel_filter(alert_receive_channel, filtering_term=filtering_term, is_default=False)
+
+ channel_filter.send_demo_alert()
+ assert mocked_create_alert.called
+ assert mocked_create_alert.call_args.args[1]["is_demo"]
+ assert mocked_create_alert.call_args.args[1]["force_route_id"] == channel_filter.id
+
+
+@mock.patch("apps.integrations.tasks.create_alertmanager_alerts.apply_async", return_value=None)
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "integration",
+ [
+ AlertReceiveChannel.INTEGRATION_ALERTMANAGER,
+ AlertReceiveChannel.INTEGRATION_GRAFANA,
+ AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING,
+ ],
+)
+def test_send_demo_alert_alertmanager_payload_shape(
+ mocked_create_alert, make_organization, make_alert_receive_channel, make_channel_filter, integration
+):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ filtering_term = "test alert"
+ channel_filter = make_channel_filter(alert_receive_channel, filtering_term=filtering_term, is_default=False)
+
+ channel_filter.send_demo_alert()
+ assert mocked_create_alert.called
+ assert mocked_create_alert.call_args.args[1]["is_demo"]
+ assert mocked_create_alert.call_args.args[1]["force_route_id"] == channel_filter.pk
diff --git a/engine/apps/alerts/tests/test_check_escalation_finished_task.py b/engine/apps/alerts/tests/test_check_escalation_finished_task.py
new file mode 100644
index 0000000000..2e2ce43645
--- /dev/null
+++ b/engine/apps/alerts/tests/test_check_escalation_finished_task.py
@@ -0,0 +1,45 @@
+import pytest
+from django.utils import timezone
+
+from apps.alerts.models import AlertReceiveChannel
+from apps.alerts.tasks import check_escalation_finished_task
+
+
+@pytest.mark.django_db
+def test_check_escalation_finished_task(
+ make_organization_and_user,
+ make_alert_receive_channel,
+ make_alert_group,
+):
+ organization, user = make_organization_and_user()
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA
+ )
+ alert_group = make_alert_group(alert_receive_channel)
+
+ now = timezone.now()
+
+ # we don't have escalation finish time, seems we cannot calculate it due escalation chain snapshot has uncalculated
+ # steps or does not exist, no exception is raised
+ check_escalation_finished_task()
+
+ # it's acceptable time for finish escalation, because we have tolerance time 5 min from now, no exception is raised
+ alert_group.estimate_escalation_finish_time = now
+ alert_group.save()
+ check_escalation_finished_task()
+
+ # it is acceptable time for finish escalation, so no exception is raised
+ alert_group.estimate_escalation_finish_time = now + timezone.timedelta(minutes=10)
+ alert_group.save()
+ check_escalation_finished_task()
+
+ # escalation is not finished yet and passed more than 5 minutes after estimate time, exception is raised
+ alert_group.estimate_escalation_finish_time = now - timezone.timedelta(minutes=10)
+ alert_group.save()
+ with pytest.raises(Exception):
+ check_escalation_finished_task()
+
+ # escalation is finished and we don't care anymore about its finish time, so no exception is raised
+ alert_group.is_escalation_finished = True
+ alert_group.save()
+ check_escalation_finished_task()
diff --git a/engine/apps/alerts/tests/test_custom_button.py b/engine/apps/alerts/tests/test_custom_button.py
new file mode 100644
index 0000000000..1b2f9e1a64
--- /dev/null
+++ b/engine/apps/alerts/tests/test_custom_button.py
@@ -0,0 +1,63 @@
+import pytest
+
+
+@pytest.mark.django_db
+def test_escaping_payload_with_double_quotes(
+ make_organization,
+ make_custom_action,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_alert,
+):
+
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+ alert_payload = {
+ "text": '"Hello world"',
+ }
+
+ alert = make_alert(alert_group=alert_group, raw_request_data=alert_payload)
+
+ custom_button = make_custom_action(
+ name="github_button",
+ webhook="https://github.com/",
+ user="Chris Vanstras",
+ password="qwerty",
+ data='{\n "text" : "{{ alert_payload.text }}"\n}',
+ authorization_header="auth_token",
+ organization=organization,
+ )
+
+ custom_button.build_post_kwargs(alert)
+
+
+@pytest.mark.django_db
+def test_escaping_payload_with_single_quote_in_string(
+ make_organization,
+ make_custom_action,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_alert,
+):
+
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+ alert_payload = {
+ "text": "Hi, it's alert",
+ }
+
+ alert = make_alert(alert_group=alert_group, raw_request_data=alert_payload)
+
+ custom_button = make_custom_action(
+ name="github_button",
+ webhook="https://github.com/",
+ user="Chris Vanstras",
+ password="qwerty",
+ data='{"data" : "{{ alert_payload }}"}',
+ authorization_header="auth_token",
+ organization=organization,
+ )
+
+ custom_button.build_post_kwargs(alert)
diff --git a/engine/apps/alerts/tests/test_default_templates.py b/engine/apps/alerts/tests/test_default_templates.py
new file mode 100644
index 0000000000..69288fb68c
--- /dev/null
+++ b/engine/apps/alerts/tests/test_default_templates.py
@@ -0,0 +1,108 @@
+import pytest
+from jinja2 import TemplateSyntaxError
+
+from apps.alerts.incident_appearance.templaters import (
+ AlertEmailTemplater,
+ AlertPhoneCallTemplater,
+ AlertSlackTemplater,
+ AlertSmsTemplater,
+ AlertTelegramTemplater,
+ AlertWebTemplater,
+)
+from apps.alerts.models import Alert, AlertReceiveChannel
+from apps.integrations.metadata.configuration import grafana
+from common.jinja_templater import jinja_template_env
+from common.utils import getattrd
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "integration, template_module",
+ # Test only the integrations that have "tests" field in configuration
+ [
+ (
+ integration.slug,
+ integration,
+ )
+ for integration in AlertReceiveChannel._config
+ if hasattr(integration, "tests")
+ ],
+)
+def test_default_templates(
+ make_organization_and_user_with_slack_identities,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_alert,
+ integration,
+ template_module,
+):
+ organization, _, _, _ = make_organization_and_user_with_slack_identities()
+ alert_receive_channel = make_alert_receive_channel(organization, integration=integration)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ alert = make_alert(alert_group=alert_group, raw_request_data=template_module.tests.get("payload"))
+
+ slack_templater = AlertSlackTemplater(alert)
+ web_templater = AlertWebTemplater(alert)
+ sms_templater = AlertSmsTemplater(alert)
+ email_templater = AlertEmailTemplater(alert)
+ telegram_templater = AlertTelegramTemplater(alert)
+ phone_call_templater = AlertPhoneCallTemplater(alert)
+ templaters = {
+ "slack": slack_templater,
+ "web": web_templater,
+ "sms": sms_templater,
+ "email": email_templater,
+ "telegram": telegram_templater,
+ "phone_call": phone_call_templater,
+ }
+ for notification_channel, templater in templaters.items():
+ rendered_alert = templater.render()
+ for attr in ["title", "message", "image_url"]:
+ expected = template_module.tests.get(notification_channel).get(attr)
+ if expected is not None:
+ expected = expected.format(
+ web_link=alert.group.web_link, integration_name=alert_receive_channel.verbal_name
+ )
+
+ rendered_attr = getattr(rendered_alert, attr)
+ assert rendered_attr == expected, (
+ f"{alert_receive_channel}'s {notification_channel} {attr} " f"is not equal to expected"
+ )
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "integration, template_module",
+ [
+ (AlertReceiveChannel.INTEGRATION_GRAFANA, grafana),
+ ],
+)
+def test_render_group_data_templates(
+ make_organization_and_user_with_slack_identities,
+ make_alert_receive_channel,
+ integration,
+ template_module,
+):
+ organization, _, _, _ = make_organization_and_user_with_slack_identities()
+ alert_receive_channel = make_alert_receive_channel(organization, integration=integration)
+
+ group_data = Alert.render_group_data(alert_receive_channel, template_module.tests.get("payload"))
+
+ assert group_data.group_distinction == template_module.tests.get("group_distinction")
+ assert group_data.is_resolve_signal == template_module.tests.get("is_resolve_signal")
+ assert group_data.is_acknowledge_signal == template_module.tests.get("is_acknowledge_signal")
+ assert group_data.group_verbose_name == template_module.tests.get("group_verbose_name")
+
+
+def test_default_templates_are_valid():
+ template_names = AlertReceiveChannel.template_names
+
+ for integration in AlertReceiveChannel._config:
+ for template_name in template_names:
+ template = getattrd(integration, f"{template_name}", None)
+ if template is not None:
+ try:
+ jinja_template_env.from_string(template)
+ except TemplateSyntaxError as e:
+ pytest.fail(e.message)
diff --git a/engine/apps/alerts/tests/test_escalation_chain.py b/engine/apps/alerts/tests/test_escalation_chain.py
new file mode 100644
index 0000000000..c70371cdda
--- /dev/null
+++ b/engine/apps/alerts/tests/test_escalation_chain.py
@@ -0,0 +1,55 @@
+import datetime
+
+import pytest
+
+from apps.alerts.models import EscalationPolicy
+from apps.schedules.models import OnCallScheduleCalendar
+
+
+@pytest.mark.django_db
+def test_copy_escalation_chain(
+ make_organization_and_user, make_escalation_chain, make_escalation_policy, make_schedule
+):
+ organization, user = make_organization_and_user()
+ escalation_chain = make_escalation_chain(organization)
+
+ notify_to_multiple_users_step = make_escalation_policy(
+ escalation_chain=escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ )
+ notify_to_multiple_users_step.notify_to_users_queue.set([user])
+ make_escalation_policy(
+ escalation_chain=escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_WAIT,
+ wait_delay=EscalationPolicy.FIFTEEN_MINUTES,
+ )
+ # random time for test
+ from_time = datetime.time(10, 30)
+ to_time = datetime.time(18, 45)
+ make_escalation_policy(
+ escalation_chain=escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_IF_TIME,
+ from_time=from_time,
+ to_time=to_time,
+ )
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
+ make_escalation_policy(
+ escalation_chain=escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_SCHEDULE,
+ notify_schedule=schedule,
+ )
+ all_fields = EscalationPolicy._meta.fields # Note that m-t-m fields are in this list
+ fields_to_not_compare = ["id", "public_primary_key", "escalation_chain", "last_notified_user"]
+ fields_to_compare = list(map(lambda f: f.name, filter(lambda f: f.name not in fields_to_not_compare, all_fields)))
+ copied_chain = escalation_chain.make_copy(f"copy_{escalation_chain.name}")
+ for policy_from_original, policy_from_copy in zip(
+ escalation_chain.escalation_policies.all(), copied_chain.escalation_policies.all()
+ ):
+ for field in fields_to_compare:
+ assert getattr(policy_from_original, field) == getattr(policy_from_copy, field)
+
+ # compare m-t-m fields separately
+ assert list(policy_from_original.notify_to_users_queue.all()) == list(
+ policy_from_copy.notify_to_users_queue.all()
+ )
diff --git a/engine/apps/alerts/tests/test_escalation_policy_snapshot.py b/engine/apps/alerts/tests/test_escalation_policy_snapshot.py
new file mode 100644
index 0000000000..a3d27f4544
--- /dev/null
+++ b/engine/apps/alerts/tests/test_escalation_policy_snapshot.py
@@ -0,0 +1,516 @@
+from unittest.mock import patch
+
+import pytest
+from django.utils import timezone
+
+from apps.alerts.constants import NEXT_ESCALATION_DELAY
+from apps.alerts.escalation_snapshot.serializers.escalation_policy_snapshot import EscalationPolicySnapshotSerializer
+from apps.alerts.escalation_snapshot.snapshot_classes import EscalationPolicySnapshot
+from apps.alerts.escalation_snapshot.utils import eta_for_escalation_step_notify_if_time
+from apps.alerts.models import AlertGroupLogRecord, EscalationPolicy
+from apps.schedules.ical_utils import list_users_to_notify_from_ical
+from apps.schedules.models import CustomOnCallShift, OnCallScheduleCalendar
+
+
+def get_escalation_policy_snapshot_from_model(escalation_policy):
+ raw_escalation_policy_data = EscalationPolicySnapshotSerializer(escalation_policy).data
+ escalation_policy_data = EscalationPolicySnapshotSerializer().to_internal_value(raw_escalation_policy_data)
+ escalation_policy_snapshot = EscalationPolicySnapshot(**escalation_policy_data)
+ return escalation_policy_snapshot
+
+
+@pytest.fixture()
+def escalation_step_test_setup(
+ make_organization_and_user,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_escalation_chain,
+ make_alert_group,
+):
+ organization, user = make_organization_and_user()
+ alert_receive_channel = make_alert_receive_channel(organization)
+
+ escalation_chain = make_escalation_chain(organization=organization)
+ channel_filter = make_channel_filter(alert_receive_channel, escalation_chain=escalation_chain)
+
+ alert_group = make_alert_group(alert_receive_channel, channel_filter=channel_filter)
+ reason = "test escalation step"
+ return organization, user, alert_receive_channel, channel_filter, alert_group, reason
+
+
+@patch("apps.alerts.escalation_snapshot.snapshot_classes.EscalationPolicySnapshot._execute_tasks", return_value=None)
+@pytest.mark.django_db
+def test_escalation_step_wait(
+ mocked_execute_tasks,
+ escalation_step_test_setup,
+ make_escalation_policy,
+):
+ _, _, _, channel_filter, alert_group, reason = escalation_step_test_setup
+ wait_delay = EscalationPolicy.FIFTEEN_MINUTES
+ wait_step = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_WAIT,
+ wait_delay=wait_delay,
+ )
+ escalation_policy_snapshot = get_escalation_policy_snapshot_from_model(wait_step)
+ now = timezone.now()
+ result = escalation_policy_snapshot.execute(alert_group, reason)
+
+ assert result.eta is not None
+ assert wait_delay + timezone.timedelta(minutes=1) > result.eta - now >= wait_delay
+ assert result.stop_escalation is False and result.pause_escalation is False and result.start_from_beginning is False
+ assert wait_step.log_records.filter(type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED).exists()
+ assert not mocked_execute_tasks.called
+
+
+@patch("apps.alerts.escalation_snapshot.snapshot_classes.EscalationPolicySnapshot._execute_tasks", return_value=None)
+@pytest.mark.django_db
+def test_escalation_step_notify_all(
+ mocked_execute_tasks,
+ escalation_step_test_setup,
+ make_escalation_policy,
+):
+ _, _, _, channel_filter, alert_group, reason = escalation_step_test_setup
+
+ notify_all_step = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_FINAL_NOTIFYALL,
+ )
+ escalation_policy_snapshot = get_escalation_policy_snapshot_from_model(notify_all_step)
+ expected_eta = timezone.now() + timezone.timedelta(seconds=NEXT_ESCALATION_DELAY)
+
+ result = escalation_policy_snapshot.execute(alert_group, reason)
+ expected_result = EscalationPolicySnapshot.StepExecutionResultData(
+ eta=result.eta,
+ stop_escalation=False,
+ pause_escalation=False,
+ start_from_beginning=False,
+ )
+ assert expected_eta + timezone.timedelta(seconds=15) > result.eta > expected_eta - timezone.timedelta(seconds=15)
+ assert result == expected_result
+ assert mocked_execute_tasks.called
+
+
+@patch("apps.alerts.escalation_snapshot.snapshot_classes.EscalationPolicySnapshot._execute_tasks", return_value=None)
+@pytest.mark.django_db
+def test_escalation_step_notify_users_queue(
+ mocked_execute_tasks,
+ make_user_for_organization,
+ escalation_step_test_setup,
+ make_escalation_policy,
+):
+ organization, user, _, channel_filter, alert_group, reason = escalation_step_test_setup
+ user_2 = make_user_for_organization(organization)
+
+ notify_queue_step = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
+ )
+ notify_queue_step.notify_to_users_queue.set([user, user_2])
+ escalation_policy_snapshot = get_escalation_policy_snapshot_from_model(notify_queue_step)
+
+ assert escalation_policy_snapshot.next_user_in_sorted_queue == escalation_policy_snapshot.sorted_users_queue[0]
+
+ expected_eta = timezone.now() + timezone.timedelta(seconds=NEXT_ESCALATION_DELAY)
+ result = escalation_policy_snapshot.execute(alert_group, reason)
+ expected_result = EscalationPolicySnapshot.StepExecutionResultData(
+ eta=result.eta,
+ stop_escalation=False,
+ pause_escalation=False,
+ start_from_beginning=False,
+ )
+ assert expected_eta + timezone.timedelta(seconds=15) > result.eta > expected_eta - timezone.timedelta(seconds=15)
+ assert result == expected_result
+ assert escalation_policy_snapshot.next_user_in_sorted_queue == escalation_policy_snapshot.sorted_users_queue[1]
+ assert notify_queue_step.log_records.filter(type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED).exists()
+ assert mocked_execute_tasks.called
+
+
+@patch("apps.alerts.escalation_snapshot.snapshot_classes.EscalationPolicySnapshot._execute_tasks", return_value=None)
+@pytest.mark.django_db
+def test_escalation_step_notify_multiple_users(
+ mocked_execute_tasks,
+ escalation_step_test_setup,
+ make_escalation_policy,
+):
+ organization, user, _, channel_filter, alert_group, reason = escalation_step_test_setup
+
+ notify_users_step = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ )
+ notify_users_step.notify_to_users_queue.set([user])
+ escalation_policy_snapshot = get_escalation_policy_snapshot_from_model(notify_users_step)
+
+ expected_eta = timezone.now() + timezone.timedelta(seconds=NEXT_ESCALATION_DELAY)
+ result = escalation_policy_snapshot.execute(alert_group, reason)
+ expected_result = EscalationPolicySnapshot.StepExecutionResultData(
+ eta=result.eta,
+ stop_escalation=False,
+ pause_escalation=False,
+ start_from_beginning=False,
+ )
+ assert expected_eta + timezone.timedelta(seconds=15) > result.eta > expected_eta - timezone.timedelta(seconds=15)
+ assert result == expected_result
+ assert notify_users_step.log_records.filter(type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED).exists()
+ assert mocked_execute_tasks.called
+
+
+@patch("apps.alerts.escalation_snapshot.snapshot_classes.EscalationPolicySnapshot._execute_tasks", return_value=None)
+@pytest.mark.django_db
+def test_escalation_step_notify_on_call_schedule(
+ mocked_execute_tasks,
+ escalation_step_test_setup,
+ make_escalation_policy,
+ make_schedule,
+ make_on_call_shift,
+):
+ organization, user, _, channel_filter, alert_group, reason = escalation_step_test_setup
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
+ # create on_call_shift with user to notify
+ data = {
+ "start": timezone.datetime.now().replace(microsecond=0),
+ "duration": timezone.timedelta(seconds=7200),
+ }
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_SINGLE_EVENT, **data
+ )
+ on_call_shift.users.add(user)
+ schedule.custom_on_call_shifts.add(on_call_shift)
+
+ notify_schedule_step = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_SCHEDULE,
+ notify_schedule=schedule,
+ )
+ escalation_policy_snapshot = get_escalation_policy_snapshot_from_model(notify_schedule_step)
+ expected_eta = timezone.now() + timezone.timedelta(seconds=NEXT_ESCALATION_DELAY)
+ result = escalation_policy_snapshot.execute(alert_group, reason)
+ expected_result = EscalationPolicySnapshot.StepExecutionResultData(
+ eta=result.eta,
+ stop_escalation=False,
+ pause_escalation=False,
+ start_from_beginning=False,
+ )
+ assert expected_eta + timezone.timedelta(seconds=15) > result.eta > expected_eta - timezone.timedelta(seconds=15)
+ assert result == expected_result
+ assert notify_schedule_step.log_records.filter(type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED).exists()
+ assert list(escalation_policy_snapshot.notify_to_users_queue) == list(list_users_to_notify_from_ical(schedule))
+ assert mocked_execute_tasks.called
+
+
+@patch("apps.alerts.escalation_snapshot.snapshot_classes.EscalationPolicySnapshot._execute_tasks", return_value=None)
+@pytest.mark.django_db
+def test_escalation_step_notify_user_group(
+ mocked_execute_tasks,
+ escalation_step_test_setup,
+ make_slack_team_identity,
+ make_slack_user_group,
+ make_escalation_policy,
+):
+ organization, user, _, channel_filter, alert_group, reason = escalation_step_test_setup
+ slack_team_identity = make_slack_team_identity()
+ organization.slack_team_identity = slack_team_identity
+ organization.save()
+ user_group = make_slack_user_group(slack_team_identity)
+
+ notify_user_group_step = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_GROUP,
+ notify_to_group=user_group,
+ )
+ escalation_policy_snapshot = get_escalation_policy_snapshot_from_model(notify_user_group_step)
+ expected_eta = timezone.now() + timezone.timedelta(seconds=NEXT_ESCALATION_DELAY)
+ result = escalation_policy_snapshot.execute(alert_group, reason)
+ expected_result = EscalationPolicySnapshot.StepExecutionResultData(
+ eta=result.eta,
+ stop_escalation=False,
+ pause_escalation=False,
+ start_from_beginning=False,
+ )
+ assert expected_eta + timezone.timedelta(seconds=15) > result.eta > expected_eta - timezone.timedelta(seconds=15)
+ assert result == expected_result
+ assert mocked_execute_tasks.called
+
+
+@patch("apps.alerts.escalation_snapshot.snapshot_classes.EscalationPolicySnapshot._execute_tasks", return_value=None)
+@pytest.mark.django_db
+def test_escalation_step_notify_if_time(
+ mocked_execute_tasks,
+ escalation_step_test_setup,
+ make_escalation_policy,
+):
+ organization, user, _, channel_filter, alert_group, reason = escalation_step_test_setup
+
+ # current time is not between from_time and to_time, step returns eta
+ now = timezone.now()
+ from_time = (now - timezone.timedelta(hours=2)).time()
+ to_time = (now - timezone.timedelta(hours=1)).time()
+ notify_if_time_step_1 = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_IF_TIME,
+ from_time=from_time,
+ to_time=to_time,
+ )
+
+ escalation_policy_snapshot = get_escalation_policy_snapshot_from_model(notify_if_time_step_1)
+ estimated_time_of_arrival = eta_for_escalation_step_notify_if_time(from_time, to_time)
+ expected_result = EscalationPolicySnapshot.StepExecutionResultData(
+ eta=estimated_time_of_arrival,
+ stop_escalation=False,
+ pause_escalation=False,
+ start_from_beginning=False,
+ )
+ assert estimated_time_of_arrival is not None
+
+ result = escalation_policy_snapshot.execute(alert_group, reason)
+
+ assert result == expected_result
+ assert notify_if_time_step_1.log_records.filter(type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED).exists()
+ assert not mocked_execute_tasks.called
+
+ # current time is between from_time and to_time, eta is None
+ from_time = (now - timezone.timedelta(hours=2)).time()
+ to_time = (now + timezone.timedelta(hours=1)).time()
+ notify_if_time_step_2 = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_IF_TIME,
+ from_time=from_time,
+ to_time=to_time,
+ )
+
+ escalation_policy_snapshot = get_escalation_policy_snapshot_from_model(notify_if_time_step_2)
+ estimated_time_of_arrival = eta_for_escalation_step_notify_if_time(from_time, to_time)
+ assert estimated_time_of_arrival is None
+
+ expected_eta = timezone.now() + timezone.timedelta(seconds=NEXT_ESCALATION_DELAY)
+ result = escalation_policy_snapshot.execute(alert_group, reason)
+ expected_result = EscalationPolicySnapshot.StepExecutionResultData(
+ eta=result.eta,
+ stop_escalation=False,
+ pause_escalation=False,
+ start_from_beginning=False,
+ )
+ assert expected_eta + timezone.timedelta(seconds=15) > result.eta > expected_eta - timezone.timedelta(seconds=15)
+
+ assert result == expected_result
+ assert notify_if_time_step_2.log_records.filter(type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED).exists()
+ assert not mocked_execute_tasks.called
+
+
+@patch("apps.alerts.escalation_snapshot.snapshot_classes.EscalationPolicySnapshot._execute_tasks", return_value=None)
+@pytest.mark.django_db
+def test_escalation_step_notify_if_num_alerts_in_window(
+ mocked_execute_tasks, escalation_step_test_setup, make_escalation_policy, make_alert
+):
+ organization, user, _, channel_filter, alert_group, reason = escalation_step_test_setup
+
+ make_alert(alert_group=alert_group, raw_request_data={})
+ make_alert(alert_group=alert_group, raw_request_data={})
+
+ notify_if_3_alerts_per_1_minute = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW,
+ num_alerts_in_window=3,
+ num_minutes_in_window=1,
+ )
+
+ escalation_policy_snapshot = get_escalation_policy_snapshot_from_model(notify_if_3_alerts_per_1_minute)
+ expected_eta = timezone.now() + timezone.timedelta(seconds=NEXT_ESCALATION_DELAY)
+ result = escalation_policy_snapshot.execute(alert_group, reason)
+ expected_result = EscalationPolicySnapshot.StepExecutionResultData(
+ eta=result.eta,
+ stop_escalation=False,
+ pause_escalation=True,
+ start_from_beginning=False,
+ )
+ assert expected_eta + timezone.timedelta(seconds=15) > result.eta > expected_eta - timezone.timedelta(seconds=15)
+ assert result == expected_result
+ assert notify_if_3_alerts_per_1_minute.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED
+ ).exists()
+ assert not mocked_execute_tasks.called
+
+ organization, user, _, channel_filter, alert_group, reason = escalation_step_test_setup
+
+ make_alert(alert_group=alert_group, raw_request_data={})
+
+ notify_if_1_alert = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW,
+ num_alerts_in_window=1,
+ num_minutes_in_window=2,
+ )
+
+ escalation_policy_snapshot = get_escalation_policy_snapshot_from_model(notify_if_1_alert)
+ expected_eta = timezone.now() + timezone.timedelta(seconds=NEXT_ESCALATION_DELAY)
+ result = escalation_policy_snapshot.execute(alert_group, reason)
+ expected_result = EscalationPolicySnapshot.StepExecutionResultData(
+ eta=result.eta,
+ stop_escalation=False,
+ pause_escalation=False,
+ start_from_beginning=False,
+ )
+ assert expected_eta + timezone.timedelta(seconds=15) > result.eta > expected_eta - timezone.timedelta(seconds=15)
+ assert result == expected_result
+ assert not mocked_execute_tasks.called
+
+
+@patch("apps.alerts.escalation_snapshot.snapshot_classes.EscalationPolicySnapshot._execute_tasks", return_value=None)
+@pytest.mark.django_db
+def test_escalation_step_trigger_custom_button(
+ mocked_execute_tasks,
+ escalation_step_test_setup,
+ make_custom_action,
+ make_escalation_policy,
+):
+ organization, _, alert_receive_channel, channel_filter, alert_group, reason = escalation_step_test_setup
+
+ custom_button = make_custom_action(organization=organization)
+
+ trigger_custom_button_step = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_TRIGGER_CUSTOM_BUTTON,
+ custom_button_trigger=custom_button,
+ )
+ escalation_policy_snapshot = get_escalation_policy_snapshot_from_model(trigger_custom_button_step)
+ expected_eta = timezone.now() + timezone.timedelta(seconds=NEXT_ESCALATION_DELAY)
+ result = escalation_policy_snapshot.execute(alert_group, reason)
+ expected_result = EscalationPolicySnapshot.StepExecutionResultData(
+ eta=result.eta,
+ stop_escalation=False,
+ pause_escalation=False,
+ start_from_beginning=False,
+ )
+ assert expected_eta + timezone.timedelta(seconds=15) > result.eta > expected_eta - timezone.timedelta(seconds=15)
+ assert result == expected_result
+ assert mocked_execute_tasks.called
+
+
+@patch("apps.alerts.escalation_snapshot.snapshot_classes.EscalationPolicySnapshot._execute_tasks", return_value=None)
+@pytest.mark.django_db
+def test_escalation_step_repeat_escalation_n_times(
+ mocked_execute_tasks,
+ escalation_step_test_setup,
+ make_escalation_policy,
+):
+ _, _, _, channel_filter, alert_group, reason = escalation_step_test_setup
+
+ repeat_escalation_step = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_REPEAT_ESCALATION_N_TIMES,
+ )
+ escalation_policy_snapshot = get_escalation_policy_snapshot_from_model(repeat_escalation_step)
+
+ assert escalation_policy_snapshot.escalation_counter == 0
+
+ expected_eta = timezone.now() + timezone.timedelta(seconds=NEXT_ESCALATION_DELAY)
+ result = escalation_policy_snapshot.execute(alert_group, reason)
+ expected_result = EscalationPolicySnapshot.StepExecutionResultData(
+ eta=result.eta,
+ stop_escalation=False,
+ pause_escalation=False,
+ start_from_beginning=True,
+ )
+ assert expected_eta + timezone.timedelta(seconds=15) > result.eta > expected_eta - timezone.timedelta(seconds=15)
+ assert escalation_policy_snapshot.escalation_counter == 1
+ assert result == expected_result
+ assert repeat_escalation_step.log_records.filter(type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED).exists()
+ assert not mocked_execute_tasks.called
+
+
+@patch("apps.alerts.escalation_snapshot.snapshot_classes.EscalationPolicySnapshot._execute_tasks", return_value=None)
+@pytest.mark.django_db
+def test_escalation_step_resolve(
+ mocked_execute_tasks,
+ escalation_step_test_setup,
+ make_escalation_policy,
+):
+ _, _, _, channel_filter, alert_group, reason = escalation_step_test_setup
+
+ resolve_step = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_FINAL_RESOLVE,
+ )
+ escalation_policy_snapshot = get_escalation_policy_snapshot_from_model(resolve_step)
+ expected_eta = timezone.now() + timezone.timedelta(seconds=NEXT_ESCALATION_DELAY)
+ result = escalation_policy_snapshot.execute(alert_group, reason)
+ expected_result = EscalationPolicySnapshot.StepExecutionResultData(
+ eta=result.eta,
+ stop_escalation=True,
+ pause_escalation=False,
+ start_from_beginning=False,
+ )
+ assert expected_eta + timezone.timedelta(seconds=15) > result.eta > expected_eta - timezone.timedelta(seconds=15)
+ assert result == expected_result
+ assert resolve_step.log_records.filter(type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED).exists()
+ assert mocked_execute_tasks.called
+
+
+@patch("apps.alerts.escalation_snapshot.snapshot_classes.EscalationPolicySnapshot._execute_tasks", return_value=None)
+@pytest.mark.django_db
+def test_escalation_step_is_not_configured(
+ mocked_execute_tasks,
+ escalation_step_test_setup,
+ make_escalation_policy,
+):
+ _, _, _, channel_filter, alert_group, reason = escalation_step_test_setup
+
+ not_configured_step = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=None,
+ )
+ escalation_policy_snapshot = get_escalation_policy_snapshot_from_model(not_configured_step)
+ expected_eta = timezone.now() + timezone.timedelta(seconds=NEXT_ESCALATION_DELAY)
+ result = escalation_policy_snapshot.execute(alert_group, reason)
+ expected_result = EscalationPolicySnapshot.StepExecutionResultData(
+ eta=result.eta,
+ stop_escalation=False,
+ pause_escalation=False,
+ start_from_beginning=False,
+ )
+ assert expected_eta + timezone.timedelta(seconds=15) > result.eta > expected_eta - timezone.timedelta(seconds=15)
+ assert result == expected_result
+ assert not_configured_step.log_records.filter(type=AlertGroupLogRecord.TYPE_ESCALATION_FAILED).exists()
+ assert not mocked_execute_tasks.called
+
+
+@pytest.mark.django_db
+def test_escalation_step_with_deleted_user(
+ escalation_step_test_setup,
+ make_user_for_organization,
+ make_escalation_policy,
+):
+ """
+ Test that deleted user in escalation policy snapshot will be simply ignored instead of ValidationError
+ """
+ organization, user, _, channel_filter, _, _ = escalation_step_test_setup
+ inactive_user = make_user_for_organization(organization=organization, is_active=False)
+
+ escalation_policy = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ )
+ escalation_policy.notify_to_users_queue.set([user, inactive_user])
+ raw_snapshot = {
+ "id": escalation_policy.pk,
+ "order": 0,
+ "step": escalation_policy.step,
+ "wait_delay": None,
+ "notify_to_users_queue": [user.pk, inactive_user.pk],
+ "last_notified_user": None,
+ "from_time": None,
+ "to_time": None,
+ "num_alerts_in_window": None,
+ "num_minutes_in_window": None,
+ "custom_button_trigger": None,
+ "notify_schedule": None,
+ "notify_to_group": None,
+ "escalation_counter": 0,
+ "passed_last_time": None,
+ "pause_escalation": False,
+ }
+
+ deserialized_escalation_snapshot = EscalationPolicySnapshotSerializer().to_internal_value(raw_snapshot)
+ assert deserialized_escalation_snapshot["notify_to_users_queue"] == [user]
diff --git a/engine/apps/alerts/tests/test_escalation_snapshot.py b/engine/apps/alerts/tests/test_escalation_snapshot.py
new file mode 100644
index 0000000000..98aaad0331
--- /dev/null
+++ b/engine/apps/alerts/tests/test_escalation_snapshot.py
@@ -0,0 +1,228 @@
+import datetime
+
+import pytest
+from django.utils import timezone
+
+from apps.alerts.escalation_snapshot.snapshot_classes import (
+ ChannelFilterSnapshot,
+ EscalationPolicySnapshot,
+ EscalationSnapshot,
+)
+from apps.alerts.models import EscalationPolicy
+
+
+@pytest.fixture()
+def escalation_snapshot_test_setup(
+ make_organization_and_user,
+ make_user_for_organization,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_escalation_chain,
+ make_escalation_policy,
+ make_alert_group,
+):
+ organization, user_1 = make_organization_and_user()
+ user_2 = make_user_for_organization(organization)
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+
+ escalation_chain = make_escalation_chain(organization)
+ channel_filter = make_channel_filter(
+ alert_receive_channel,
+ escalation_chain=escalation_chain,
+ notification_backends={"BACKEND": {"channel_id": "abc123"}},
+ )
+
+ notify_to_multiple_users_step = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ )
+ notify_to_multiple_users_step.notify_to_users_queue.set([user_1, user_2])
+ wait_step = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_WAIT,
+ wait_delay=EscalationPolicy.FIFTEEN_MINUTES,
+ )
+ # random time for test
+ from_time = datetime.time(10, 30)
+ to_time = datetime.time(18, 45)
+ notify_if_time_step = make_escalation_policy(
+ escalation_chain=channel_filter.escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_IF_TIME,
+ from_time=from_time,
+ to_time=to_time,
+ )
+
+ alert_group = make_alert_group(alert_receive_channel, channel_filter=channel_filter)
+ alert_group.raw_escalation_snapshot = alert_group.build_raw_escalation_snapshot()
+ alert_group.save()
+ return alert_group, notify_to_multiple_users_step, wait_step, notify_if_time_step
+
+
+@pytest.mark.django_db
+def test_raw_escalation_snapshot(escalation_snapshot_test_setup):
+ alert_group, notify_to_multiple_users_step, wait_step, notify_if_time_step = escalation_snapshot_test_setup
+ raw_escalation_snapshot = alert_group.build_raw_escalation_snapshot()
+
+ expected_result = {
+ "channel_filter_snapshot": {
+ "id": alert_group.channel_filter.pk,
+ "str_for_clients": alert_group.channel_filter.str_for_clients,
+ "notify_in_slack": True,
+ "notify_in_telegram": False,
+ "notification_backends": alert_group.channel_filter.notification_backends,
+ },
+ "pause_escalation": False,
+ "last_active_escalation_policy_order": None,
+ "slack_channel_id": None,
+ "next_step_eta": None,
+ "escalation_chain_snapshot": {
+ "id": notify_to_multiple_users_step.escalation_chain.pk,
+ "name": notify_to_multiple_users_step.escalation_chain.name,
+ },
+ "escalation_policies_snapshots": [
+ {
+ "id": notify_to_multiple_users_step.pk,
+ "order": 0,
+ "step": EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ "wait_delay": None,
+ "notify_to_users_queue": [u.pk for u in notify_to_multiple_users_step.notify_to_users_queue.all()],
+ "last_notified_user": None,
+ "notify_schedule": None,
+ "notify_to_group": None,
+ "from_time": None,
+ "to_time": None,
+ "num_alerts_in_window": None,
+ "num_minutes_in_window": None,
+ "custom_button_trigger": None,
+ "escalation_counter": 0,
+ "passed_last_time": None,
+ "pause_escalation": False,
+ },
+ {
+ "id": wait_step.pk,
+ "order": 1,
+ "step": EscalationPolicy.STEP_WAIT,
+ "wait_delay": "00:15:00",
+ "notify_to_users_queue": [],
+ "last_notified_user": None,
+ "notify_schedule": None,
+ "notify_to_group": None,
+ "from_time": None,
+ "to_time": None,
+ "num_alerts_in_window": None,
+ "num_minutes_in_window": None,
+ "custom_button_trigger": None,
+ "escalation_counter": 0,
+ "passed_last_time": None,
+ "pause_escalation": False,
+ },
+ {
+ "id": notify_if_time_step.pk,
+ "order": 2,
+ "step": EscalationPolicy.STEP_NOTIFY_IF_TIME,
+ "wait_delay": None,
+ "notify_to_users_queue": [],
+ "last_notified_user": None,
+ "notify_schedule": None,
+ "notify_to_group": None,
+ "from_time": notify_if_time_step.from_time.isoformat(),
+ "to_time": notify_if_time_step.to_time.isoformat(),
+ "num_alerts_in_window": None,
+ "num_minutes_in_window": None,
+ "custom_button_trigger": None,
+ "escalation_counter": 0,
+ "passed_last_time": None,
+ "pause_escalation": False,
+ },
+ ],
+ }
+ assert raw_escalation_snapshot == expected_result
+
+
+@pytest.mark.django_db
+def test_serialized_escalation_snapshot(escalation_snapshot_test_setup):
+ alert_group, notify_to_multiple_users_step, wait_step, notify_if_time_step = escalation_snapshot_test_setup
+ escalation_snapshot = alert_group.escalation_snapshot
+ assert isinstance(escalation_snapshot, EscalationSnapshot)
+ assert escalation_snapshot.channel_filter_snapshot is not None and isinstance(
+ escalation_snapshot.channel_filter_snapshot, ChannelFilterSnapshot
+ )
+ assert escalation_snapshot.escalation_policies_snapshots is not None and isinstance(
+ escalation_snapshot.escalation_policies_snapshots[0], EscalationPolicySnapshot
+ )
+ assert (
+ len(escalation_snapshot.escalation_policies_snapshots)
+ == alert_group.channel_filter.escalation_chain.escalation_policies.count()
+ )
+
+ escalation_snapshot_dict = escalation_snapshot.convert_to_dict()
+
+ assert alert_group.raw_escalation_snapshot == escalation_snapshot_dict
+
+
+@pytest.mark.django_db
+def test_escalation_snapshot_with_deleted_channel_filter(escalation_snapshot_test_setup):
+ alert_group, notify_to_multiple_users_step, wait_step, notify_if_time_step = escalation_snapshot_test_setup
+ alert_group.channel_filter.delete()
+
+ escalation_snapshot = alert_group.escalation_snapshot
+ escalation_snapshot_dict = escalation_snapshot.convert_to_dict()
+
+ assert alert_group.raw_escalation_snapshot == escalation_snapshot_dict
+
+
+@pytest.mark.django_db
+def test_change_escalation_snapshot(escalation_snapshot_test_setup):
+ alert_group, notify_to_multiple_users_step, wait_step, notify_if_time_step = escalation_snapshot_test_setup
+
+ new_active_order = 2
+ now = timezone.now()
+ escalation_snapshot = alert_group.escalation_snapshot
+ escalation_snapshot.last_active_escalation_policy_order = new_active_order
+ escalation_snapshot.escalation_policies_snapshots[0].passed_last_time = now
+
+ escalation_snapshot.save_to_alert_group()
+ # rebuild escalation snapshot to be sure that changes was saved
+ escalation_snapshot = alert_group.escalation_snapshot
+
+ assert escalation_snapshot.last_active_escalation_policy_order == new_active_order
+ assert escalation_snapshot.escalation_policies_snapshots[0].passed_last_time == now
+
+ assert alert_group.raw_escalation_snapshot == escalation_snapshot.convert_to_dict()
+
+
+@pytest.mark.django_db
+def test_next_escalation_policy_snapshot(escalation_snapshot_test_setup):
+ alert_group, notify_to_multiple_users_step, wait_step, notify_if_time_step = escalation_snapshot_test_setup
+ escalation_snapshot = alert_group.escalation_snapshot
+
+ assert escalation_snapshot.last_active_escalation_policy_order is None
+ assert escalation_snapshot.last_active_escalation_policy_snapshot is None
+ assert (
+ escalation_snapshot.next_active_escalation_policy_snapshot
+ is escalation_snapshot.escalation_policies_snapshots[0]
+ )
+
+ escalation_snapshot.last_active_escalation_policy_order = 0
+
+ assert escalation_snapshot.last_active_escalation_policy_order == 0
+ assert (
+ escalation_snapshot.last_active_escalation_policy_snapshot
+ is escalation_snapshot.escalation_policies_snapshots[0]
+ )
+ assert (
+ escalation_snapshot.next_active_escalation_policy_snapshot
+ is escalation_snapshot.escalation_policies_snapshots[1]
+ )
+
+ escalation_policies_snapshots_count = len(escalation_snapshot.escalation_policies_snapshots)
+ last_active_escalation_policy_order = escalation_policies_snapshots_count - 1
+ escalation_snapshot.last_active_escalation_policy_order = last_active_escalation_policy_order
+
+ assert escalation_snapshot.last_active_escalation_policy_order == last_active_escalation_policy_order
+ assert (
+ escalation_snapshot.last_active_escalation_policy_snapshot
+ is escalation_snapshot.escalation_policies_snapshots[-1]
+ )
+ assert escalation_snapshot.next_active_escalation_policy_snapshot is None
diff --git a/engine/apps/alerts/tests/test_incident_log_builder.py b/engine/apps/alerts/tests/test_incident_log_builder.py
new file mode 100644
index 0000000000..84e096b86f
--- /dev/null
+++ b/engine/apps/alerts/tests/test_incident_log_builder.py
@@ -0,0 +1,39 @@
+import pytest
+
+from apps.alerts.incident_log_builder import IncidentLogBuilder
+from apps.alerts.models import EscalationPolicy
+from apps.base.models import UserNotificationPolicy
+
+
+@pytest.mark.django_db
+def test_escalation_plan_messaging_backends(
+ make_organization_and_user,
+ make_user_notification_policy,
+ make_escalation_chain,
+ make_escalation_policy,
+ make_channel_filter,
+ make_alert_receive_channel,
+ make_alert_group,
+):
+ organization, user = make_organization_and_user()
+ make_user_notification_policy(
+ user,
+ UserNotificationPolicy.Step.NOTIFY,
+ notify_by=UserNotificationPolicy.NotificationChannel.TESTONLY,
+ )
+ escalation_chain = make_escalation_chain(organization=organization)
+ escalation_policy = make_escalation_policy(
+ escalation_chain=escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
+ last_notified_user=user,
+ )
+ escalation_policy.notify_to_users_queue.set([user])
+ alert_receive_channel = make_alert_receive_channel(organization=organization)
+ channel_filter = make_channel_filter(alert_receive_channel, escalation_chain=escalation_chain)
+ alert_group = make_alert_group(alert_receive_channel, channel_filter=channel_filter)
+ alert_group.raw_escalation_snapshot = alert_group.build_raw_escalation_snapshot()
+ alert_group.save()
+
+ log_builder = IncidentLogBuilder(alert_group=alert_group)
+ plan = log_builder.get_incident_escalation_plan()
+ assert list(plan.values()) == [["send test only backend message to {}".format(user.username)]]
diff --git a/engine/apps/alerts/tests/test_maintenance.py b/engine/apps/alerts/tests/test_maintenance.py
new file mode 100644
index 0000000000..61e08f056d
--- /dev/null
+++ b/engine/apps/alerts/tests/test_maintenance.py
@@ -0,0 +1,220 @@
+import pytest
+
+from apps.alerts.models import AlertGroup, AlertReceiveChannel
+from apps.alerts.tasks import disable_maintenance
+from common.exceptions import MaintenanceCouldNotBeStartedError
+
+
+@pytest.fixture()
+def maintenance_test_setup(
+ make_organization_and_user,
+ make_escalation_chain,
+):
+ organization, user = make_organization_and_user()
+ make_escalation_chain(organization)
+ return organization, user
+
+
+@pytest.mark.django_db
+def test_start_maintenance_integration(
+ maintenance_test_setup, make_alert_receive_channel, mock_start_disable_maintenance_task
+):
+ organization, user = maintenance_test_setup
+
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA
+ )
+ mode = AlertReceiveChannel.MAINTENANCE
+ duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds
+
+ alert_receive_channel.start_maintenance(mode, duration, user)
+
+ assert alert_receive_channel.maintenance_mode == mode
+ assert alert_receive_channel.maintenance_duration == AlertReceiveChannel.DURATION_ONE_HOUR
+ assert alert_receive_channel.maintenance_uuid is not None
+ assert alert_receive_channel.maintenance_started_at is not None
+ assert alert_receive_channel.maintenance_author == user
+
+
+@pytest.mark.django_db
+def test_start_maintenance_integration_multiple_previous_instances(
+ maintenance_test_setup, make_alert_receive_channel, mock_start_disable_maintenance_task
+):
+ organization, user = maintenance_test_setup
+
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA
+ )
+ # 2 maintenance integrations were created in the past
+ for i in range(2):
+ AlertReceiveChannel.create(organization=organization, integration=AlertReceiveChannel.INTEGRATION_MAINTENANCE)
+
+ mode = AlertReceiveChannel.MAINTENANCE
+ duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds
+
+ alert_receive_channel.start_maintenance(mode, duration, user)
+
+ assert alert_receive_channel.maintenance_mode == mode
+ assert alert_receive_channel.maintenance_duration == AlertReceiveChannel.DURATION_ONE_HOUR
+ assert alert_receive_channel.maintenance_uuid is not None
+ assert alert_receive_channel.maintenance_started_at is not None
+ assert alert_receive_channel.maintenance_author == user
+
+
+@pytest.mark.django_db
+def test_maintenance_integration_will_not_start_twice(
+ maintenance_test_setup, make_alert_receive_channel, mock_start_disable_maintenance_task
+):
+ organization, user = maintenance_test_setup
+
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA
+ )
+ mode = AlertReceiveChannel.MAINTENANCE
+ duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds
+
+ alert_receive_channel.start_maintenance(mode, duration, user)
+ with pytest.raises(MaintenanceCouldNotBeStartedError):
+ alert_receive_channel.start_maintenance(mode, duration, user)
+
+ assert alert_receive_channel.maintenance_mode == mode
+ assert alert_receive_channel.maintenance_duration == AlertReceiveChannel.DURATION_ONE_HOUR
+ assert alert_receive_channel.maintenance_uuid is not None
+ assert alert_receive_channel.maintenance_started_at is not None
+ assert alert_receive_channel.maintenance_author == user
+
+
+@pytest.mark.django_db
+def test_start_maintenance_team(maintenance_test_setup, mock_start_disable_maintenance_task):
+ organization, user = maintenance_test_setup
+
+ mode = AlertReceiveChannel.MAINTENANCE
+ duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds
+
+ organization.start_maintenance(mode, duration, user)
+
+ assert organization.maintenance_mode == mode
+ assert organization.maintenance_duration == AlertReceiveChannel.DURATION_ONE_HOUR
+ assert organization.maintenance_uuid is not None
+ assert organization.maintenance_started_at is not None
+ assert organization.maintenance_author == user
+
+
+@pytest.mark.django_db
+def test_maintenance_team_will_not_start_twice(maintenance_test_setup, mock_start_disable_maintenance_task):
+ organization, user = maintenance_test_setup
+
+ mode = AlertReceiveChannel.MAINTENANCE
+ duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds
+
+ organization.start_maintenance(mode, duration, user)
+ with pytest.raises(MaintenanceCouldNotBeStartedError):
+ organization.start_maintenance(mode, duration, user)
+
+ assert organization.maintenance_mode == mode
+ assert organization.maintenance_duration == AlertReceiveChannel.DURATION_ONE_HOUR
+ assert organization.maintenance_uuid is not None
+ assert organization.maintenance_started_at is not None
+ assert organization.maintenance_author == user
+
+
+@pytest.mark.django_db
+def test_alert_attached_to_maintenance_incident_integration(
+ maintenance_test_setup,
+ make_alert_receive_channel,
+ make_alert_with_custom_create_method,
+ mock_start_disable_maintenance_task,
+):
+ organization, user = maintenance_test_setup
+
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA
+ )
+ mode = AlertReceiveChannel.MAINTENANCE
+ duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds
+
+ alert_receive_channel.start_maintenance(mode, duration, user)
+ maintenance_incident = AlertGroup.all_objects.get(maintenance_uuid=alert_receive_channel.maintenance_uuid)
+
+ alert = make_alert_with_custom_create_method(
+ title="test_title",
+ message="test_message",
+ image_url="test_img_url",
+ link_to_upstream_details=None,
+ alert_receive_channel=alert_receive_channel,
+ raw_request_data={"message": "test"},
+ integration_unique_data={},
+ )
+
+ assert alert.group.root_alert_group == maintenance_incident
+
+
+@pytest.mark.django_db
+def test_alert_attached_to_maintenance_incident_team(
+ maintenance_test_setup,
+ make_alert_receive_channel,
+ make_alert_with_custom_create_method,
+ mock_start_disable_maintenance_task,
+):
+ organization, user = maintenance_test_setup
+
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA
+ )
+
+ mode = AlertReceiveChannel.MAINTENANCE
+ duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds
+
+ organization.start_maintenance(mode, duration, user)
+ maintenance_incident = AlertGroup.all_objects.get(maintenance_uuid=organization.maintenance_uuid)
+
+ alert = make_alert_with_custom_create_method(
+ title="test_title",
+ message="test_message",
+ image_url="test_img_url",
+ link_to_upstream_details=None,
+ alert_receive_channel=alert_receive_channel,
+ raw_request_data={"message": "test"},
+ integration_unique_data={},
+ )
+
+ assert alert.group.root_alert_group == maintenance_incident
+
+
+@pytest.mark.django_db(transaction=True)
+def test_stop_maintenance(
+ maintenance_test_setup,
+ make_alert_receive_channel,
+ make_alert_with_custom_create_method,
+ mock_start_disable_maintenance_task,
+):
+ organization, user = maintenance_test_setup
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA
+ )
+ mode = AlertReceiveChannel.MAINTENANCE
+ duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds
+
+ alert_receive_channel.start_maintenance(mode, duration, user)
+ maintenance_incident = AlertGroup.all_objects.get(maintenance_uuid=alert_receive_channel.maintenance_uuid)
+ alert = make_alert_with_custom_create_method(
+ title="test_title",
+ message="test_message",
+ image_url="test_img_url",
+ link_to_upstream_details=None,
+ alert_receive_channel=alert_receive_channel,
+ raw_request_data={"message": "test"},
+ integration_unique_data={},
+ )
+
+ disable_maintenance(alert_receive_channel_id=alert_receive_channel.pk, force=True)
+ maintenance_incident.refresh_from_db()
+ alert.refresh_from_db()
+ assert maintenance_incident.resolved_by == AlertGroup.DISABLE_MAINTENANCE
+ assert alert.group.resolved_by == AlertGroup.DISABLE_MAINTENANCE
+
+ assert organization.maintenance_mode is None
+ assert organization.maintenance_duration is None
+ assert organization.maintenance_uuid is None
+ assert organization.maintenance_started_at is None
+ assert organization.maintenance_author is None
diff --git a/engine/apps/alerts/tests/test_notify_ical_schedule_shift.py b/engine/apps/alerts/tests/test_notify_ical_schedule_shift.py
new file mode 100644
index 0000000000..d6cb6398c4
--- /dev/null
+++ b/engine/apps/alerts/tests/test_notify_ical_schedule_shift.py
@@ -0,0 +1,63 @@
+import pytest
+
+from apps.alerts.tasks.notify_ical_schedule_shift import notify_ical_schedule_shift
+from apps.schedules.models import OnCallScheduleICal
+
+ICAL_DATA = """
+BEGIN:VCALENDAR
+PRODID:-//Google Inc//Google Calendar 70.9054//EN
+VERSION:2.0
+CALSCALE:GREGORIAN
+METHOD:PUBLISH
+X-WR-CALNAME:t
+X-WR-TIMEZONE:Asia/Yekaterinburg
+BEGIN:VTIMEZONE
+TZID:Asia/Yekaterinburg
+X-LIC-LOCATION:Asia/Yekaterinburg
+BEGIN:STANDARD
+TZOFFSETFROM:+0500
+TZOFFSETTO:+0500
+TZNAME:+05
+DTSTART:19700101T000000
+END:STANDARD
+END:VTIMEZONE
+BEGIN:VEVENT
+DTSTART;TZID=Asia/Yekaterinburg:20210124T130000
+DTEND;TZID=Asia/Yekaterinburg:20210124T220000
+RRULE:FREQ=DAILY
+DTSTAMP:20210127T143634Z
+UID:0i0af8p6p8vfampe3r1vkog0jg@google.com
+CREATED:20210127T143553Z
+DESCRIPTION:
+LAST-MODIFIED:20210127T143553Z
+LOCATION:
+SEQUENCE:0
+STATUS:CONFIRMED
+SUMMARY:@Bernard Desruisseaux
+TRANSP:OPAQUE
+END:VEVENT
+END:VCALENDAR
+"""
+
+
+@pytest.mark.django_db
+def test_current_overrides_ical_schedule_is_none(
+ make_organization_and_user_with_slack_identities,
+ make_schedule,
+):
+ organization, _, _, _ = make_organization_and_user_with_slack_identities()
+
+ ical_schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ name="test_ical_schedule",
+ channel="channel",
+ ical_url_primary="url",
+ prev_ical_file_primary=ICAL_DATA,
+ cached_ical_file_primary=ICAL_DATA,
+ prev_ical_file_overrides=ICAL_DATA,
+ cached_ical_file_overrides=None,
+ )
+
+ # this should not raise
+ notify_ical_schedule_shift(ical_schedule.oncallschedule_ptr_id)
diff --git a/engine/apps/alerts/tests/test_notify_user.py b/engine/apps/alerts/tests/test_notify_user.py
new file mode 100644
index 0000000000..0667754430
--- /dev/null
+++ b/engine/apps/alerts/tests/test_notify_user.py
@@ -0,0 +1,120 @@
+from unittest.mock import patch
+
+import pytest
+
+from apps.alerts.tasks.notify_user import perform_notification
+from apps.base.models.user_notification_policy import UserNotificationPolicy
+from apps.base.models.user_notification_policy_log_record import UserNotificationPolicyLogRecord
+
+
+@pytest.mark.django_db
+def test_custom_backend_call(
+ make_organization,
+ make_user,
+ make_user_notification_policy,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_user_notification_policy_log_record,
+):
+ organization = make_organization()
+ user_1 = make_user(organization=organization)
+ user_notification_policy = make_user_notification_policy(
+ user=user_1,
+ step=UserNotificationPolicy.Step.NOTIFY,
+ notify_by=UserNotificationPolicy.NotificationChannel.TESTONLY,
+ )
+ alert_receive_channel = make_alert_receive_channel(organization=organization)
+ alert_group = make_alert_group(alert_receive_channel=alert_receive_channel)
+ log_record = make_user_notification_policy_log_record(
+ author=user_1,
+ alert_group=alert_group,
+ notification_policy=user_notification_policy,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED,
+ )
+
+ with patch("apps.base.tests.messaging_backend.TestOnlyBackend.notify_user") as mock_notify_user:
+ perform_notification(log_record.pk)
+
+ mock_notify_user.assert_called_once_with(user_1, alert_group, user_notification_policy)
+
+
+@pytest.mark.django_db
+def test_custom_backend_error(
+ make_organization,
+ make_user,
+ make_user_notification_policy,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_user_notification_policy_log_record,
+):
+ organization = make_organization()
+ user_1 = make_user(organization=organization)
+ user_notification_policy = make_user_notification_policy(
+ user=user_1,
+ step=UserNotificationPolicy.Step.NOTIFY,
+ notify_by=UserNotificationPolicy.NotificationChannel.TESTONLY,
+ )
+ alert_receive_channel = make_alert_receive_channel(organization=organization)
+ alert_group = make_alert_group(alert_receive_channel=alert_receive_channel)
+ log_record = make_user_notification_policy_log_record(
+ author=user_1,
+ alert_group=alert_group,
+ notification_policy=user_notification_policy,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED,
+ )
+
+ with patch("apps.alerts.tasks.notify_user.get_messaging_backend_from_id") as mock_get_backend:
+ mock_get_backend.return_value = None
+ perform_notification(log_record.pk)
+
+ error_log_record = UserNotificationPolicyLogRecord.objects.last()
+ assert error_log_record.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED
+ assert error_log_record.reason == "Messaging backend not available"
+ assert (
+ error_log_record.notification_error_code
+ == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_MESSAGING_BACKEND_ERROR
+ )
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "author_set,notification_policy_set",
+ [
+ (False, True),
+ (True, False),
+ ],
+)
+def test_notify_user_missing_data_errors(
+ make_organization,
+ make_user,
+ make_user_notification_policy,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_user_notification_policy_log_record,
+ author_set,
+ notification_policy_set,
+):
+ organization = make_organization()
+ user_1 = make_user(organization=organization)
+ user_notification_policy = make_user_notification_policy(
+ user=user_1,
+ step=UserNotificationPolicy.Step.NOTIFY,
+ notify_by=UserNotificationPolicy.NotificationChannel.SMS,
+ )
+ alert_receive_channel = make_alert_receive_channel(organization=organization)
+ alert_group = make_alert_group(alert_receive_channel=alert_receive_channel)
+ log_record = make_user_notification_policy_log_record(
+ author=user_1 if author_set else None,
+ alert_group=alert_group,
+ notification_policy=user_notification_policy if notification_policy_set else None,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED,
+ )
+
+ with patch("apps.alerts.tasks.notify_user.get_messaging_backend_from_id") as mock_get_backend:
+ mock_get_backend.return_value = None
+ perform_notification(log_record.pk)
+
+ error_log_record = UserNotificationPolicyLogRecord.objects.last()
+ assert error_log_record.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED
+ assert error_log_record.reason == "Expected data is missing"
+ assert error_log_record.notification_error_code is None
diff --git a/engine/apps/alerts/tests/test_representative.py b/engine/apps/alerts/tests/test_representative.py
new file mode 100644
index 0000000000..b1cf8f8648
--- /dev/null
+++ b/engine/apps/alerts/tests/test_representative.py
@@ -0,0 +1,45 @@
+import pytest
+
+from apps.alerts.models import AlertGroupLogRecord, AlertReceiveChannel
+from apps.slack.representatives.alert_group_representative import AlertGroupSlackRepresentative
+
+
+@pytest.mark.django_db
+def test_handler_escalation_triggered(
+ make_organization_and_user,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_alert_group,
+ make_alert,
+ make_alert_group_log_record,
+):
+ organization, user = make_organization_and_user()
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA
+ )
+ channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+
+ alert_group = make_alert_group(alert_receive_channel, channel_filter=channel_filter)
+ make_alert(
+ alert_group,
+ raw_request_data={
+ "evalMatches": [
+ {"value": 100, "metric": "High value", "tags": None},
+ {"value": 200, "metric": "Higher Value", "tags": None},
+ ],
+ "message": "Someone is testing the alert notification within grafana.",
+ "ruleId": 0,
+ "ruleName": "Test notification",
+ "ruleUrl": "http://localhost:3000/",
+ "state": "alerting",
+ "title": "[Alerting] Test notification",
+ },
+ )
+
+ escalation_log_record = make_alert_group_log_record(
+ alert_group, type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED, author=None
+ )
+
+ representative = AlertGroupSlackRepresentative(escalation_log_record)
+ handler = representative.get_handler()
+ assert handler.__name__ == "on_handler_not_found"
diff --git a/engine/apps/alerts/tests/test_silence.py b/engine/apps/alerts/tests/test_silence.py
new file mode 100644
index 0000000000..b4e0d4fcdf
--- /dev/null
+++ b/engine/apps/alerts/tests/test_silence.py
@@ -0,0 +1,71 @@
+import pytest
+from django.utils import timezone
+
+from apps.alerts.models import AlertReceiveChannel
+
+
+@pytest.mark.django_db
+def test_silence_alert_group(
+ make_organization_and_user,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_alert,
+ mock_start_disable_maintenance_task,
+):
+ organization, user = make_organization_and_user()
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA
+ )
+ alert_group = make_alert_group(alert_receive_channel)
+ alert_group.silence()
+
+ assert alert_group.silenced is True
+ assert alert_group.silenced_at is not None
+
+
+@pytest.mark.django_db
+def test_silence_by_user_alert_group(
+ make_organization_and_user,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_alert,
+ mock_start_disable_maintenance_task,
+):
+ organization, user = make_organization_and_user()
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA
+ )
+ alert_group = make_alert_group(alert_receive_channel)
+ alert_group.silence()
+
+ assert alert_group.silenced is True
+ assert alert_group.silenced_at is not None
+
+
+@pytest.mark.django_db
+def test_unsilence_alert_group(
+ make_organization_and_user,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_alert,
+ mock_start_disable_maintenance_task,
+):
+ organization, user = make_organization_and_user()
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA
+ )
+ now = timezone.now()
+ silenced_until = now + timezone.timedelta(seconds=3600)
+ alert_group = make_alert_group(
+ alert_receive_channel,
+ silenced=True,
+ silenced_at=timezone.now(),
+ silenced_by_user=user,
+ silenced_until=silenced_until,
+ )
+ alert_group.un_silence()
+
+ assert alert_group.silenced is False
+ assert alert_group.silenced_at is None
+ assert alert_group.silenced_until is None
+ assert alert_group.silenced_by_user is None
diff --git a/engine/apps/alerts/tests/test_terraform_renderer.py b/engine/apps/alerts/tests/test_terraform_renderer.py
new file mode 100644
index 0000000000..78ece24b93
--- /dev/null
+++ b/engine/apps/alerts/tests/test_terraform_renderer.py
@@ -0,0 +1,147 @@
+import pytest
+from django.utils import dateparse, timezone
+from django.utils.text import slugify
+
+from apps.alerts.models import EscalationPolicy
+from apps.alerts.terraform_renderer import TerraformFileRenderer, TerraformStateRenderer
+from apps.schedules.models import CustomOnCallShift, OnCallScheduleCalendar
+
+terraform_file_renderer_data = {
+ "filtering_term": "\\[test\\]",
+ "escaped_filtering_term": "\\\\[test\\\\]",
+}
+
+rendered_terraform_file_template = """
+data "amixr_user" "{user_name}" {{
+ username = "{user_name}"
+}}
+
+resource "amixr_escalation_chain" "{escalation_chain_name}" {{
+ name = "{escalation_chain_name}"
+ team_id = null
+}}
+
+resource "amixr_escalation" "escalation-1-{escalation_chain_name}" {{
+ escalation_chain_id = amixr_escalation_chain.{escalation_chain_name}.id
+ type = "notify_persons"
+ important = false
+ persons_to_notify = [
+ data.amixr_user.{user_name}.id
+ ]
+ position = 0
+}}
+
+resource "amixr_integration" "{integration_name}" {{
+ name = "{integration_verbal_name}"
+ type = "grafana"
+ team_id = null
+}}
+
+resource "amixr_on_call_shift" "{shift_name}" {{
+ name = "{shift_name}"
+ type = "rolling_users"
+ team_id = null
+ start = "2021-08-16T17:00:00"
+ duration = 3600
+ level = 0
+ frequency = "weekly"
+ interval = 1
+ week_start = "MO"
+ by_day = ["MO", "SA"]
+ by_month = null
+ by_monthday = null
+ rolling_users = [
+ [data.amixr_user.{user_name}.id],
+ ]
+}}
+
+resource "amixr_schedule" "{schedule_name}" {{
+ name = "{schedule_name}"
+ type = "calendar"
+ team_id = null
+ time_zone = "UTC"
+}}
+"""
+
+rendered_terraform_imports_template = """terraform import amixr_escalation_chain.{escalation_chain_name} {escalation_chain_public_primary_key}
+terraform import amixr_escalation.escalation-1-{escalation_chain_name} {escalation_1_public_primary_key}
+terraform import amixr_integration.{integration_name} {integration_public_primary_key}
+"""
+
+
+@pytest.mark.django_db
+def test_render_terraform_file(
+ make_organization_and_user_with_slack_identities,
+ make_integration_escalation_chain_route_escalation_policy,
+ make_escalation_chain,
+ make_escalation_policy,
+ make_on_call_shift,
+ make_schedule,
+):
+ organization, user, _, _ = make_organization_and_user_with_slack_identities()
+ (integration, escalation_chain, _, escalation_policy) = make_integration_escalation_chain_route_escalation_policy(
+ organization,
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ )
+ escalation_policy.notify_to_users_queue.add(user)
+
+ schedule = make_schedule(
+ organization=organization,
+ schedule_class=OnCallScheduleCalendar,
+ name="test_calendar_schedule",
+ )
+
+ shift = make_on_call_shift(
+ organization=organization,
+ name="test_shift",
+ shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT,
+ frequency=CustomOnCallShift.FREQUENCY_WEEKLY,
+ interval=1,
+ week_start=CustomOnCallShift.MONDAY,
+ start=dateparse.parse_datetime("2021-08-16T17:00:00"),
+ duration=timezone.timedelta(seconds=3600),
+ by_day=["MO", "SA"],
+ rolling_users=[{user.pk: user.public_primary_key}],
+ )
+
+ renderer = TerraformFileRenderer(organization)
+ result = renderer.render_terraform_file()
+
+ expected_result = rendered_terraform_file_template.format(
+ user_name=slugify(user.username),
+ escalation_chain_name=escalation_chain.name,
+ integration_name=slugify(integration.verbal_name),
+ integration_verbal_name=integration.verbal_name,
+ routing_regex=terraform_file_renderer_data["escaped_filtering_term"],
+ schedule_name=schedule.name,
+ shift_name=shift.name,
+ )
+
+ assert result == expected_result
+
+
+@pytest.mark.django_db
+def test_render_terraform_imports(
+ make_organization_and_user_with_slack_identities,
+ make_integration_escalation_chain_route_escalation_policy,
+ make_escalation_chain,
+ make_escalation_policy,
+):
+ organization, user, _, _ = make_organization_and_user_with_slack_identities()
+ integration, escalation_chain, _, escalation_policy = make_integration_escalation_chain_route_escalation_policy(
+ organization,
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ )
+
+ renderer = TerraformStateRenderer(organization)
+ result = renderer.render_state()
+
+ expected_result = rendered_terraform_imports_template.format(
+ escalation_chain_name=escalation_chain.name,
+ escalation_chain_public_primary_key=escalation_chain.public_primary_key,
+ integration_name=slugify(integration.verbal_name),
+ integration_public_primary_key=integration.public_primary_key,
+ escalation_1_public_primary_key=escalation_policy.public_primary_key,
+ )
+
+ assert result == expected_result
diff --git a/engine/apps/alerts/tests/test_utils.py b/engine/apps/alerts/tests/test_utils.py
new file mode 100644
index 0000000000..ff19018a08
--- /dev/null
+++ b/engine/apps/alerts/tests/test_utils.py
@@ -0,0 +1,14 @@
+import socket
+from unittest.mock import patch
+
+import pytest
+
+from apps.alerts.utils import request_outgoing_webhook
+
+
+@pytest.mark.django_db
+def test_request_outgoing_webhook_cannot_resolve_name():
+ with patch("apps.alerts.utils.socket.gethostbyname", side_effect=socket.gaierror):
+ success, err = request_outgoing_webhook("http://something.something/webhook", "GET")
+ assert success is False
+ assert err == "Cannot resolve name in url"
diff --git a/engine/apps/alerts/tests/test_wipe.py b/engine/apps/alerts/tests/test_wipe.py
new file mode 100644
index 0000000000..218ef0ed72
--- /dev/null
+++ b/engine/apps/alerts/tests/test_wipe.py
@@ -0,0 +1,20 @@
+import pytest
+
+from apps.alerts.tasks.wipe import wipe
+
+
+@pytest.mark.django_db
+def test_wipe_alert_group(
+ make_organization_and_user,
+ make_alert_receive_channel,
+ make_alert_group,
+):
+ organization, user = make_organization_and_user()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ wipe(alert_group.pk, user.pk)
+
+ alert_group.refresh_from_db()
+ assert alert_group.wiped_at is not None
+ assert alert_group.wiped_by == user
diff --git a/engine/apps/alerts/utils.py b/engine/apps/alerts/utils.py
new file mode 100644
index 0000000000..fc757b6bb9
--- /dev/null
+++ b/engine/apps/alerts/utils.py
@@ -0,0 +1,91 @@
+import ipaddress
+import json
+import socket
+from typing import Tuple
+from urllib.parse import urlparse
+
+import requests
+
+OUTGOING_WEBHOOK_TIMEOUT = 10
+
+
+def render_relative_timeline(log_created_at, alert_group_started_at):
+ time_delta = log_created_at - alert_group_started_at
+ seconds = int(time_delta.total_seconds())
+ days, seconds = divmod(seconds, 86400)
+ hours, seconds = divmod(seconds, 3600)
+ minutes, seconds = divmod(seconds, 60)
+ if days > 0:
+ return "%dd%dh%dm%ds" % (days, hours, minutes, seconds)
+ elif hours > 0:
+ return "%dh%dm%ds" % (hours, minutes, seconds)
+ elif minutes > 0:
+ return "%dm%ds" % (minutes, seconds)
+ else:
+ return "%ds" % (seconds,)
+
+
+def render_curl_command(webhook_url, http_request_type, post_kwargs):
+ if http_request_type == "POST":
+ curl_request = "curl -X POST"
+ if "auth" in post_kwargs:
+ curl_request += "\n-u ****"
+ if "headers" in post_kwargs:
+ curl_request += "\n-H ****"
+ if "json" in post_kwargs:
+ curl_request += "\n-d '{}'".format(json.dumps(post_kwargs["json"], indent=2, sort_keys=True))
+ curl_request += "\n{}".format(webhook_url)
+ elif http_request_type == "GET":
+ curl_request = f"curl -X GET {webhook_url}"
+ else:
+ raise Exception("Unsupported http method")
+ return curl_request
+
+
+def request_outgoing_webhook(webhook_url, http_request_type, post_kwargs={}) -> Tuple[bool, str]:
+ if http_request_type not in ["POST", "GET"]:
+ raise Exception(f"Wrong http_method parameter: {http_request_type}")
+
+ parsed_url = urlparse(webhook_url)
+ # ensure the url looks like url
+ if parsed_url.scheme not in ["http", "https"]:
+ return False, "Malformed url"
+ if not parsed_url.netloc:
+ return False, "Malformed url"
+ # Get the ip address of the webhook url and check if it belongs to the private network
+ try:
+ webhook_url_ip_address = socket.gethostbyname(parsed_url.netloc)
+ except socket.gaierror:
+ return False, "Cannot resolve name in url"
+ if ipaddress.ip_address(socket.gethostbyname(webhook_url_ip_address)).is_private:
+ return False, "This url is not supported for outgoing webhooks"
+
+ try:
+ if http_request_type == "POST":
+ r = requests.post(webhook_url, timeout=OUTGOING_WEBHOOK_TIMEOUT, **post_kwargs)
+ elif http_request_type == "GET":
+ r = requests.get(webhook_url, timeout=OUTGOING_WEBHOOK_TIMEOUT)
+ else:
+ raise Exception()
+ r.raise_for_status()
+ return True, "OK 200"
+ except requests.exceptions.HTTPError:
+ return False, "HTTP error {}".format(r.status_code)
+ except requests.exceptions.SSLError:
+ return False, "ssl certificate error"
+ except requests.exceptions.ConnectionError:
+ return False, "Connection error happened. Probably that's because of network or proxy."
+ except requests.exceptions.MissingSchema:
+ return False, "Url {} is incorrect. http:// or https:// might be missing.".format(webhook_url)
+ except requests.exceptions.ChunkedEncodingError:
+ return False, "File content or headers might be wrong."
+ except requests.exceptions.InvalidURL:
+ return False, "Url {} is incorrect".format(webhook_url)
+ except requests.exceptions.TooManyRedirects:
+ return False, "Multiple redirects happened. That's suspicious!"
+ except requests.exceptions.Timeout:
+ return False, f"Request timeout {OUTGOING_WEBHOOK_TIMEOUT} secs exceeded."
+ except requests.exceptions.RequestException: # This is the correct syntax
+ return False, "Failed to call outgoing webhook"
+ except Exception:
+ return False, "Failed to call outgoing webhook"
diff --git a/engine/apps/api/__init__.py b/engine/apps/api/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/api/permissions/__init__.py b/engine/apps/api/permissions/__init__.py
new file mode 100644
index 0000000000..d3f1d47e70
--- /dev/null
+++ b/engine/apps/api/permissions/__init__.py
@@ -0,0 +1,5 @@
+from .actions import ActionPermission # noqa: F401
+from .constants import ALL_BASE_ACTIONS, MODIFY_ACTIONS, READ_ACTIONS # noqa: F401
+from .methods import MethodPermission # noqa: F401
+from .owner import IsOwner, IsOwnerOrAdmin, IsOwnerOrAdminOrEditor # noqa: F401
+from .roles import AnyRole, IsAdmin, IsAdminOrEditor, IsEditor, IsStaff, IsViewer # noqa: F401
diff --git a/engine/apps/api/permissions/actions.py b/engine/apps/api/permissions/actions.py
new file mode 100644
index 0000000000..74136e12ee
--- /dev/null
+++ b/engine/apps/api/permissions/actions.py
@@ -0,0 +1,27 @@
+from typing import Any
+
+from rest_framework import permissions
+from rest_framework.request import Request
+from rest_framework.viewsets import ViewSet
+
+
+class ActionPermission(permissions.BasePermission):
+ def has_permission(self, request: Request, view: ViewSet) -> bool:
+ for permission, actions in getattr(view, "action_permissions", {}).items():
+ if view.action in actions:
+ return permission().has_permission(request, view)
+
+ return False
+
+ def has_object_permission(self, request: Request, view: ViewSet, obj: Any) -> bool:
+ # action_object_permissions attr should be used in case permission check require lookup
+ # for some object's properties e.g. team.
+ if getattr(view, "action_object_permissions", None):
+ for permission, actions in getattr(view, "action_object_permissions", {}).items():
+ if view.action in actions:
+ return permission().has_object_permission(request, view, obj)
+ return False
+ else:
+ # has_object_permission is called after has_permission, so return True if in view there is not
+ # action_object_permission attr which mean no additional check involving object required
+ return True
diff --git a/engine/apps/api/permissions/constants.py b/engine/apps/api/permissions/constants.py
new file mode 100644
index 0000000000..29e828ce1d
--- /dev/null
+++ b/engine/apps/api/permissions/constants.py
@@ -0,0 +1,14 @@
+READ_ACTIONS = (
+ "list",
+ "retrieve",
+ "metadata",
+)
+
+MODIFY_ACTIONS = (
+ "create",
+ "update",
+ "partial_update",
+ "destroy",
+)
+
+ALL_BASE_ACTIONS = READ_ACTIONS + MODIFY_ACTIONS
diff --git a/engine/apps/api/permissions/methods.py b/engine/apps/api/permissions/methods.py
new file mode 100644
index 0000000000..6ff1b11093
--- /dev/null
+++ b/engine/apps/api/permissions/methods.py
@@ -0,0 +1,12 @@
+from rest_framework import permissions
+from rest_framework.request import Request
+from rest_framework.viewsets import ViewSet
+
+
+class MethodPermission(permissions.BasePermission):
+ def has_permission(self, request: Request, view: ViewSet) -> bool:
+ for permission, methods in getattr(view, "method_permissions", {}).items():
+ if request.method in methods:
+ return permission().has_permission(request, view)
+
+ return False
diff --git a/engine/apps/api/permissions/owner.py b/engine/apps/api/permissions/owner.py
new file mode 100644
index 0000000000..4a4fc69e34
--- /dev/null
+++ b/engine/apps/api/permissions/owner.py
@@ -0,0 +1,24 @@
+from typing import Any
+
+from rest_framework import permissions
+from rest_framework.request import Request
+from rest_framework.viewsets import ViewSet
+
+from apps.api.permissions.roles import IsAdmin, IsEditor
+from common.utils import getattrd
+
+
+class IsOwner(permissions.BasePermission):
+ def has_object_permission(self, request: Request, view: ViewSet, obj: Any) -> bool:
+ ownership_field = getattr(view, "ownership_field", None)
+ if ownership_field is None:
+ owner = obj
+ else:
+ owner = getattrd(obj, ownership_field)
+
+ return owner == request.user
+
+
+IsOwnerOrAdmin = IsOwner | IsAdmin
+
+IsOwnerOrAdminOrEditor = IsOwner | IsAdmin | IsEditor
diff --git a/engine/apps/api/permissions/roles.py b/engine/apps/api/permissions/roles.py
new file mode 100644
index 0000000000..3ae9d5486b
--- /dev/null
+++ b/engine/apps/api/permissions/roles.py
@@ -0,0 +1,49 @@
+from typing import Any
+
+from rest_framework import permissions
+from rest_framework.authentication import BasicAuthentication, SessionAuthentication
+from rest_framework.request import Request
+from rest_framework.viewsets import ViewSet
+
+from common.constants.role import Role
+
+
+class RolePermission(permissions.BasePermission):
+ ROLE = None
+
+ def has_permission(self, request: Request, view: ViewSet) -> bool:
+ return request.user.role == type(self).ROLE
+
+ def has_object_permission(self, request: Request, view: ViewSet, obj: Any) -> bool:
+ return self.has_permission(request, view)
+
+
+class IsAdmin(RolePermission):
+ ROLE = Role.ADMIN
+
+
+class IsEditor(RolePermission):
+ ROLE = Role.EDITOR
+
+
+class IsViewer(RolePermission):
+ ROLE = Role.VIEWER
+
+
+IsAdminOrEditor = IsAdmin | IsEditor
+AnyRole = IsAdmin | IsEditor | IsViewer
+
+
+class IsStaff(permissions.BasePermission):
+ STAFF_AUTH_CLASSES = [BasicAuthentication, SessionAuthentication]
+
+ def has_permission(self, request: Request, view: ViewSet) -> bool:
+ user = request.user
+ if not any(isinstance(request._authenticator, x) for x in self.STAFF_AUTH_CLASSES):
+ return False
+ if user and user.is_authenticated:
+ return user.is_staff
+ return False
+
+ def has_object_permission(self, request: Request, view: ViewSet, obj: Any) -> bool:
+ return self.has_permission(request, view)
diff --git a/engine/apps/api/response_renderers.py b/engine/apps/api/response_renderers.py
new file mode 100644
index 0000000000..0364344b4a
--- /dev/null
+++ b/engine/apps/api/response_renderers.py
@@ -0,0 +1,13 @@
+from rest_framework import renderers
+
+
+class PlainTextRenderer(renderers.BaseRenderer):
+ media_type = "text/plain"
+
+ def render(self, data, media_type=None, renderer_context=None):
+ if type(data) == dict:
+ result = ""
+ for k, v in data.items():
+ result += f"{k}: {v}\n"
+ return result
+ return data.encode(self.charset)
diff --git a/engine/apps/api/serializers/__init__.py b/engine/apps/api/serializers/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/api/serializers/alert.py b/engine/apps/api/serializers/alert.py
new file mode 100644
index 0000000000..98f3b64984
--- /dev/null
+++ b/engine/apps/api/serializers/alert.py
@@ -0,0 +1,22 @@
+from rest_framework import serializers
+
+from apps.alerts.incident_appearance.renderers.web_renderer import AlertWebRenderer
+from apps.alerts.models import Alert
+
+
+class AlertSerializer(serializers.ModelSerializer):
+ render_for_web = serializers.SerializerMethodField()
+
+ class Meta:
+ model = Alert
+ fields = [
+ "title",
+ "message",
+ "image_url",
+ "link_to_upstream_details",
+ "render_for_web",
+ "created_at",
+ ]
+
+ def get_render_for_web(self, obj):
+ return AlertWebRenderer(obj).render()
diff --git a/engine/apps/api/serializers/alert_group.py b/engine/apps/api/serializers/alert_group.py
new file mode 100644
index 0000000000..6ffc90d0c2
--- /dev/null
+++ b/engine/apps/api/serializers/alert_group.py
@@ -0,0 +1,195 @@
+import logging
+from datetime import datetime
+
+import humanize
+from rest_framework import serializers
+
+from apps.alerts.incident_appearance.renderers.web_renderer import AlertGroupWebRenderer
+from apps.alerts.models import AlertGroup
+from common.api_helpers.mixins import EagerLoadingMixin
+
+from .alert import AlertSerializer
+from .alert_receive_channel import FastAlertReceiveChannelSerializer
+from .user import FastUserSerializer
+
+logger = logging.getLogger(__name__)
+
+
+class ShortAlertGroupSerializer(serializers.ModelSerializer):
+ pk = serializers.CharField(read_only=True, source="public_primary_key")
+ alert_receive_channel = FastAlertReceiveChannelSerializer(source="channel")
+ render_for_web = serializers.SerializerMethodField()
+
+ class Meta:
+ model = AlertGroup
+ fields = ["pk", "render_for_web", "alert_receive_channel", "inside_organization_number"]
+
+ def get_render_for_web(self, obj):
+ return AlertGroupWebRenderer(obj).render()
+
+
+class AlertGroupSerializer(EagerLoadingMixin, serializers.ModelSerializer):
+ """
+ Attention: It's heavily cached. Make sure to invalidate alertgroup's web cache if you update the format!
+ """
+
+ pk = serializers.CharField(read_only=True, source="public_primary_key")
+ alert_receive_channel = FastAlertReceiveChannelSerializer(source="channel")
+ alerts = serializers.SerializerMethodField("get_limited_alerts")
+ resolved_by_verbose = serializers.CharField(source="get_resolved_by_display")
+ resolved_by_user = FastUserSerializer(required=False)
+ acknowledged_by_user = FastUserSerializer(required=False)
+ silenced_by_user = FastUserSerializer(required=False)
+ related_users = serializers.SerializerMethodField()
+
+ last_alert_at = serializers.SerializerMethodField()
+
+ started_at_verbose = serializers.SerializerMethodField()
+ acknowledged_at_verbose = serializers.SerializerMethodField()
+ resolved_at_verbose = serializers.SerializerMethodField()
+ silenced_at_verbose = serializers.SerializerMethodField()
+
+ dependent_alert_groups = ShortAlertGroupSerializer(many=True)
+ root_alert_group = ShortAlertGroupSerializer()
+
+ alerts_count = serializers.ReadOnlyField()
+
+ status = serializers.ReadOnlyField()
+ render_for_web = serializers.SerializerMethodField()
+
+ PREFETCH_RELATED = [
+ "alerts",
+ "dependent_alert_groups",
+ "log_records",
+ "log_records__author",
+ "log_records__escalation_policy",
+ "log_records__invitation__invitee",
+ ]
+
+ SELECT_RELATED = [
+ "slack_message",
+ "channel__organization",
+ "slack_message___slack_team_identity",
+ "acknowledged_by_user",
+ "resolved_by_user",
+ "silenced_by_user",
+ ]
+
+ class Meta:
+ model = AlertGroup
+ fields = [
+ "pk",
+ "alerts_count",
+ "inside_organization_number",
+ "verbose_name",
+ "alert_receive_channel",
+ "resolved",
+ "resolved_by",
+ "resolved_by_verbose",
+ "resolved_by_user",
+ "resolved_at",
+ "acknowledged_at",
+ "acknowledged",
+ "acknowledged_on_source",
+ "acknowledged_at",
+ "acknowledged_by_user",
+ "silenced",
+ "silenced_by_user",
+ "silenced_at",
+ "silenced_at_verbose",
+ "silenced_until",
+ "started_at",
+ "last_alert_at",
+ "silenced_until",
+ "permalink",
+ "alerts",
+ "related_users",
+ "started_at_verbose",
+ "acknowledged_at_verbose",
+ "resolved_at_verbose",
+ "render_for_web",
+ "render_after_resolve_report_json",
+ "dependent_alert_groups",
+ "root_alert_group",
+ "status",
+ ]
+
+ def get_last_alert_at(self, obj):
+ last_alert = obj.alerts.last()
+ # TODO: This is a Hotfix for 0.0.27
+ if last_alert is None:
+ logger.warning(f"obj {obj} doesn't have last_alert!")
+ return ""
+ return str(last_alert.created_at)
+
+ def get_limited_alerts(self, obj):
+ """
+ Overriding default alerts because there are alert_groups with thousands of them.
+ It's just too slow, we need to cut here.
+ """
+ alerts = obj.alerts.all()[:100]
+
+ if len(alerts) > 90:
+ for alert in alerts:
+ alert.title = str(alert.title) + " Only last 100 alerts are shown. Use Amixr API to fetch all of them."
+
+ return AlertSerializer(alerts, many=True).data
+
+ def get_related_users(self, obj):
+ users_ids = set()
+ users = []
+
+ # add resolved and acknowledged by_user explicitly because logs are already prefetched
+ # when def acknowledge/resolve are called in view.
+ if obj.resolved_by_user:
+ users_ids.add(obj.resolved_by_user.public_primary_key)
+ users.append(obj.resolved_by_user.short())
+
+ if obj.acknowledged_by_user and obj.acknowledged_by_user.public_primary_key not in users_ids:
+ users_ids.add(obj.acknowledged_by_user.public_primary_key)
+ users.append(obj.acknowledged_by_user.short())
+
+ if obj.silenced_by_user and obj.silenced_by_user.public_primary_key not in users_ids:
+ users_ids.add(obj.silenced_by_user.public_primary_key)
+ users.append(obj.silenced_by_user.short())
+
+ for log_record in obj.log_records.all():
+ if log_record.author is not None and log_record.author.public_primary_key not in users_ids:
+ users.append(log_record.author.short())
+ users_ids.add(log_record.author.public_primary_key)
+ return users
+
+ def get_started_at_verbose(self, obj):
+ started_at_verbose = None
+ if obj.started_at is not None:
+ started_at_verbose = humanize.naturaltime(
+ datetime.now().replace(tzinfo=None) - obj.started_at.replace(tzinfo=None)
+ )
+ return started_at_verbose
+
+ def get_acknowledged_at_verbose(self, obj):
+ acknowledged_at_verbose = None
+ if obj.acknowledged_at is not None:
+ acknowledged_at_verbose = humanize.naturaltime(
+ datetime.now().replace(tzinfo=None) - obj.acknowledged_at.replace(tzinfo=None)
+ ) # TODO: Deal with timezones
+ return acknowledged_at_verbose
+
+ def get_resolved_at_verbose(self, obj):
+ resolved_at_verbose = None
+ if obj.resolved_at is not None:
+ resolved_at_verbose = humanize.naturaltime(
+ datetime.now().replace(tzinfo=None) - obj.resolved_at.replace(tzinfo=None)
+ ) # TODO: Deal with timezones
+ return resolved_at_verbose
+
+ def get_silenced_at_verbose(self, obj):
+ silenced_at_verbose = None
+ if obj.silenced_at is not None:
+ silenced_at_verbose = humanize.naturaltime(
+ datetime.now().replace(tzinfo=None) - obj.silenced_at.replace(tzinfo=None)
+ ) # TODO: Deal with timezones
+ return silenced_at_verbose
+
+ def get_render_for_web(self, obj):
+ return AlertGroupWebRenderer(obj).render()
diff --git a/engine/apps/api/serializers/alert_receive_channel.py b/engine/apps/api/serializers/alert_receive_channel.py
new file mode 100644
index 0000000000..b04f2283b1
--- /dev/null
+++ b/engine/apps/api/serializers/alert_receive_channel.py
@@ -0,0 +1,656 @@
+from collections import OrderedDict
+from collections.abc import Mapping
+
+from django.apps import apps
+from django.conf import settings
+from django.core.exceptions import ObjectDoesNotExist
+from django.core.exceptions import ValidationError as DjangoValidationError
+from django.core.validators import URLValidator
+from django.template.loader import render_to_string
+from jinja2 import TemplateSyntaxError
+from rest_framework import serializers
+from rest_framework.exceptions import ValidationError
+from rest_framework.fields import SerializerMethodField, SkipField, get_error_detail, set_value
+from rest_framework.settings import api_settings
+
+from apps.alerts.grafana_alerting_sync_manager.grafana_alerting_sync import GrafanaAlertingSyncManager
+from apps.alerts.models import AlertReceiveChannel
+from apps.base.messaging import get_messaging_backends
+from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField, WritableSerializerMethodField
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.mixins import IMAGE_URL, TEMPLATE_NAMES_ONLY_WITH_NOTIFICATION_CHANNEL, EagerLoadingMixin
+from common.api_helpers.utils import CurrentTeamDefault
+from common.jinja_templater import jinja_template_env
+
+from .integration_heartbeat import IntegrationHeartBeatSerializer
+
+
+def valid_jinja_template_for_serializer_method_field(template):
+ for _, val in template.items():
+ try:
+ jinja_template_env.from_string(val)
+ except TemplateSyntaxError:
+ raise serializers.ValidationError("invalid template")
+
+
+class AlertReceiveChannelSerializer(EagerLoadingMixin, serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ integration_url = serializers.ReadOnlyField()
+ alert_count = serializers.SerializerMethodField()
+ alert_groups_count = serializers.SerializerMethodField()
+ author = serializers.CharField(read_only=True, source="author.public_primary_key")
+ organization = serializers.CharField(read_only=True, source="organization.public_primary_key")
+ team = TeamPrimaryKeyRelatedField(allow_null=True, default=CurrentTeamDefault())
+ is_able_to_autoresolve = serializers.ReadOnlyField()
+ default_channel_filter = serializers.SerializerMethodField()
+ instructions = serializers.SerializerMethodField()
+ demo_alert_enabled = serializers.BooleanField(source="is_demo_alert_enabled", read_only=True)
+ maintenance_till = serializers.ReadOnlyField(source="till_maintenance_timestamp")
+ heartbeat = serializers.SerializerMethodField()
+ allow_delete = serializers.SerializerMethodField()
+
+ # integration heartbeat is in PREFETCH_RELATED not by mistake.
+ # With using of select_related ORM builds strange join
+ # which leads to incorrect heartbeat-alert_receive_channel binding in result
+ PREFETCH_RELATED = ["channel_filters", "integration_heartbeat"]
+ SELECT_RELATED = ["organization", "author"]
+
+ class Meta:
+ model = AlertReceiveChannel
+ fields = [
+ "id",
+ "description",
+ "integration",
+ "smile_code",
+ "verbal_name",
+ "author",
+ "organization",
+ "team",
+ "created_at",
+ "integration_url",
+ "alert_count",
+ "alert_groups_count",
+ "allow_source_based_resolving",
+ "instructions",
+ "is_able_to_autoresolve",
+ "default_channel_filter",
+ "demo_alert_enabled",
+ "maintenance_mode",
+ "maintenance_till",
+ "heartbeat",
+ "is_available_for_integration_heartbeat",
+ "allow_delete",
+ ]
+ read_only_fields = [
+ "created_at",
+ "author",
+ "organization",
+ "smile_code",
+ "integration_url",
+ "instructions",
+ "demo_alert_enabled",
+ "maintenance_mode",
+ ]
+ extra_kwargs = {"integration": {"required": True}}
+
+ def create(self, validated_data):
+ organization = self.context["request"].auth.organization
+ integration = validated_data.get("integration")
+ if integration == AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING:
+ connection_error = GrafanaAlertingSyncManager.check_for_connection_errors(organization)
+ if connection_error:
+ raise BadRequest(detail=connection_error)
+ instance = AlertReceiveChannel.create(
+ **validated_data, organization=organization, author=self.context["request"].user
+ )
+
+ return instance
+
+ def get_instructions(self, obj):
+ if obj.integration in [AlertReceiveChannel.INTEGRATION_MAINTENANCE]:
+ return ""
+
+ rendered_instruction_for_web = render_to_string(
+ AlertReceiveChannel.INTEGRATIONS_TO_INSTRUCTIONS_WEB[obj.integration], {"alert_receive_channel": obj}
+ )
+
+ return rendered_instruction_for_web
+
+ # MethodFields are used instead of relevant properties because of properties hit db on each instance in queryset
+ def get_default_channel_filter(self, obj):
+ for filter in obj.channel_filters.all():
+ if filter.is_default:
+ return filter.public_primary_key
+
+ def validate_verbal_name(self, verbal_name):
+ organization = self.context["request"].auth.organization
+ if verbal_name is None or (self.instance and verbal_name == self.instance.verbal_name):
+ return verbal_name
+ try:
+ obj = AlertReceiveChannel.objects.get(organization=organization, verbal_name=verbal_name)
+ except AlertReceiveChannel.DoesNotExist:
+ return verbal_name
+ if self.instance and obj.id == self.instance.id:
+ return verbal_name
+ else:
+ raise serializers.ValidationError(detail="Integration with this name already exists")
+
+ def get_heartbeat(self, obj):
+ try:
+ heartbeat = obj.integration_heartbeat
+ except ObjectDoesNotExist:
+ return None
+ return IntegrationHeartBeatSerializer(heartbeat).data
+
+ def get_allow_delete(self, obj):
+ return True
+
+ def get_alert_count(self, obj):
+ return 0
+
+ def get_alert_groups_count(self, obj):
+ return 0
+
+
+class AlertReceiveChannelUpdateSerializer(AlertReceiveChannelSerializer):
+ class Meta(AlertReceiveChannelSerializer.Meta):
+ read_only_fields = [*AlertReceiveChannelSerializer.Meta.read_only_fields, "integration"]
+
+
+class FastAlertReceiveChannelSerializer(serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ integration = serializers.CharField(read_only=True)
+ deleted = serializers.SerializerMethodField()
+
+ class Meta:
+ model = AlertReceiveChannel
+ fields = ["id", "integration", "verbal_name", "deleted"]
+
+ def get_deleted(self, obj):
+ return obj.deleted_at is not None
+
+
+class FilterAlertReceiveChannelSerializer(serializers.ModelSerializer):
+ value = serializers.SerializerMethodField()
+ display_name = serializers.SerializerMethodField()
+
+ class Meta:
+ model = AlertReceiveChannel
+ fields = ["value", "display_name"]
+
+ def get_value(self, obj):
+ return obj.public_primary_key
+
+ def get_display_name(self, obj):
+ display_name = obj.verbal_name or AlertReceiveChannel.INTEGRATION_CHOICES[obj.integration][1]
+ return display_name
+
+
+class AlertReceiveChannelTemplatesSerializer(EagerLoadingMixin, serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ slack_title_template = WritableSerializerMethodField(
+ allow_null=True,
+ deserializer_field=serializers.CharField(),
+ validators=[valid_jinja_template_for_serializer_method_field],
+ required=False,
+ )
+ slack_message_template = WritableSerializerMethodField(
+ allow_null=True,
+ deserializer_field=serializers.CharField(),
+ validators=[valid_jinja_template_for_serializer_method_field],
+ required=False,
+ )
+ slack_image_url_template = WritableSerializerMethodField(
+ allow_null=True,
+ deserializer_field=serializers.CharField(),
+ validators=[valid_jinja_template_for_serializer_method_field],
+ required=False,
+ )
+ web_title_template = WritableSerializerMethodField(
+ allow_null=True,
+ deserializer_field=serializers.CharField(),
+ validators=[valid_jinja_template_for_serializer_method_field],
+ required=False,
+ )
+ web_message_template = WritableSerializerMethodField(
+ allow_null=True,
+ deserializer_field=serializers.CharField(),
+ validators=[valid_jinja_template_for_serializer_method_field],
+ required=False,
+ )
+ web_image_url_template = WritableSerializerMethodField(
+ allow_null=True,
+ deserializer_field=serializers.CharField(),
+ validators=[valid_jinja_template_for_serializer_method_field],
+ required=False,
+ )
+ sms_title_template = WritableSerializerMethodField(
+ allow_null=True,
+ deserializer_field=serializers.CharField(),
+ validators=[valid_jinja_template_for_serializer_method_field],
+ required=False,
+ )
+ phone_call_title_template = WritableSerializerMethodField(
+ allow_null=True,
+ deserializer_field=serializers.CharField(),
+ validators=[valid_jinja_template_for_serializer_method_field],
+ required=False,
+ )
+ telegram_title_template = WritableSerializerMethodField(
+ allow_null=True,
+ deserializer_field=serializers.CharField(),
+ validators=[valid_jinja_template_for_serializer_method_field],
+ required=False,
+ )
+ telegram_message_template = WritableSerializerMethodField(
+ allow_null=True,
+ deserializer_field=serializers.CharField(),
+ validators=[valid_jinja_template_for_serializer_method_field],
+ required=False,
+ )
+ telegram_image_url_template = WritableSerializerMethodField(
+ allow_null=True,
+ deserializer_field=serializers.CharField(),
+ validators=[valid_jinja_template_for_serializer_method_field],
+ required=False,
+ )
+ email_title_template = WritableSerializerMethodField(
+ allow_null=True,
+ deserializer_field=serializers.CharField(),
+ validators=[valid_jinja_template_for_serializer_method_field],
+ required=False,
+ )
+ email_message_template = WritableSerializerMethodField(
+ allow_null=True,
+ deserializer_field=serializers.CharField(),
+ validators=[valid_jinja_template_for_serializer_method_field],
+ required=False,
+ )
+ source_link_template = WritableSerializerMethodField(
+ allow_null=True,
+ deserializer_field=serializers.CharField(),
+ validators=[valid_jinja_template_for_serializer_method_field],
+ required=False,
+ )
+ grouping_id_template = WritableSerializerMethodField(
+ allow_null=True,
+ deserializer_field=serializers.CharField(),
+ validators=[valid_jinja_template_for_serializer_method_field],
+ required=False,
+ )
+ acknowledge_condition_template = WritableSerializerMethodField(
+ allow_null=True,
+ deserializer_field=serializers.CharField(),
+ validators=[valid_jinja_template_for_serializer_method_field],
+ required=False,
+ )
+ resolve_condition_template = WritableSerializerMethodField(
+ allow_null=True,
+ deserializer_field=serializers.CharField(),
+ validators=[valid_jinja_template_for_serializer_method_field],
+ required=False,
+ )
+
+ payload_example = SerializerMethodField()
+
+ class Meta:
+ model = AlertReceiveChannel
+ fields = [
+ "id",
+ "verbal_name",
+ "slack_title_template",
+ "slack_message_template",
+ "slack_image_url_template",
+ "sms_title_template",
+ "phone_call_title_template",
+ "web_title_template",
+ "web_message_template",
+ "web_image_url_template",
+ "email_title_template",
+ "email_message_template",
+ "telegram_title_template",
+ "telegram_message_template",
+ "telegram_image_url_template",
+ "source_link_template",
+ "grouping_id_template",
+ "resolve_condition_template",
+ "payload_example",
+ "acknowledge_condition_template",
+ ]
+ extra_kwargs = {"integration": {"required": True}}
+
+ # MethodFields are used instead of relevant properties because of properties hit db on each instance in queryset
+
+ def get_slack_title_template(self, obj):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_SLACK_TITLE_TEMPLATE[obj.integration]
+ return obj.slack_title_template or default_template
+
+ def set_slack_title_template(self, value):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_SLACK_TITLE_TEMPLATE[self.instance.integration]
+ if default_template is None or default_template.strip() != value.strip():
+ self.instance.slack_title_template = value.strip()
+ elif default_template is not None and default_template.strip() == value.strip():
+ self.instance.slack_title_template = None
+
+ def get_slack_message_template(self, obj):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_SLACK_MESSAGE_TEMPLATE[obj.integration]
+ return obj.slack_message_template or default_template
+
+ def set_slack_message_template(self, value):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_SLACK_MESSAGE_TEMPLATE[self.instance.integration]
+ if default_template is None or default_template.strip() != value.strip():
+ self.instance.slack_message_template = value.strip()
+ elif default_template is not None and default_template.strip() == value.strip():
+ self.instance.slack_message_template = None
+
+ def get_slack_image_url_template(self, obj):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_SLACK_IMAGE_URL_TEMPLATE[obj.integration]
+ return obj.slack_image_url_template or default_template
+
+ def set_slack_image_url_template(self, value):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_SLACK_IMAGE_URL_TEMPLATE[
+ self.instance.integration
+ ]
+ if default_template is None or default_template.strip() != value.strip():
+ self.instance.slack_image_url_template = value.strip()
+ elif default_template is not None and default_template.strip() == value.strip():
+ self.instance.slack_image_url_template = None
+
+ def get_sms_title_template(self, obj):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_SMS_TITLE_TEMPLATE[obj.integration]
+ return obj.sms_title_template or default_template
+
+ def set_sms_title_template(self, value):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_SMS_TITLE_TEMPLATE[self.instance.integration]
+ if default_template is None or default_template.strip() != value.strip():
+ self.instance.sms_title_template = value.strip()
+ elif default_template is not None and default_template.strip() == value.strip():
+ self.instance.sms_title_template = None
+
+ def get_phone_call_title_template(self, obj):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_PHONE_CALL_TITLE_TEMPLATE[obj.integration]
+ return obj.phone_call_title_template or default_template
+
+ def set_phone_call_title_template(self, value):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_PHONE_CALL_TITLE_TEMPLATE[
+ self.instance.integration
+ ]
+ if default_template is None or default_template.strip() != value.strip():
+ self.instance.phone_call_title_template = value.strip()
+ elif default_template is not None and default_template.strip() == value.strip():
+ self.instance.phone_call_title_template = None
+
+ def get_web_title_template(self, obj):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_WEB_TITLE_TEMPLATE[obj.integration]
+ return obj.web_title_template or default_template
+
+ def set_web_title_template(self, value):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_WEB_TITLE_TEMPLATE[self.instance.integration]
+ if default_template is None or default_template.strip() != value.strip():
+ self.instance.web_title_template = value.strip()
+ elif default_template is not None and default_template.strip() == value.strip():
+ self.instance.web_title_template = None
+
+ def get_email_title_template(self, obj):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_EMAIL_TITLE_TEMPLATE[obj.integration]
+ return obj.email_title_template or default_template
+
+ def set_email_title_template(self, value):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_EMAIL_TITLE_TEMPLATE[self.instance.integration]
+ if default_template is None or default_template.strip() != value.strip():
+ self.instance.email_title_template = value.strip()
+ elif default_template is not None and default_template.strip() == value.strip():
+ self.instance.email_title_template = None
+
+ def get_web_message_template(self, obj):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_WEB_MESSAGE_TEMPLATE[obj.integration]
+ return obj.web_message_template or default_template
+
+ def set_web_message_template(self, value):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_WEB_MESSAGE_TEMPLATE[self.instance.integration]
+ if default_template is None or default_template.strip() != value.strip():
+ self.instance.web_message_template = value.strip()
+ elif default_template is not None and default_template.strip() == value.strip():
+ self.instance.web_message_template = None
+
+ def get_web_image_url_template(self, obj):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_WEB_IMAGE_URL_TEMPLATE[obj.integration]
+ return obj.web_image_url_template or default_template
+
+ def set_web_image_url_template(self, value):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_WEB_IMAGE_URL_TEMPLATE[self.instance.integration]
+ if default_template is None or default_template.strip() != value.strip():
+ self.instance.web_image_url_template = value.strip()
+ elif default_template is not None and default_template.strip() == value.strip():
+ self.instance.web_image_url_template = None
+
+ def get_email_message_template(self, obj):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_EMAIL_MESSAGE_TEMPLATE[obj.integration]
+ return obj.email_message_template or default_template
+
+ def set_email_message_template(self, value):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_EMAIL_MESSAGE_TEMPLATE[self.instance.integration]
+ if default_template is None or default_template.strip() != value.strip():
+ self.instance.email_message_template = value.strip()
+ elif default_template is not None and default_template.strip() == value.strip():
+ self.instance.email_message_template = None
+
+ def get_telegram_title_template(self, obj):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_TELEGRAM_TITLE_TEMPLATE[obj.integration]
+ return obj.telegram_title_template or default_template
+
+ def set_telegram_title_template(self, value):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_TELEGRAM_TITLE_TEMPLATE[self.instance.integration]
+ if default_template is None or default_template.strip() != value.strip():
+ self.instance.telegram_title_template = value.strip()
+ elif default_template is not None and default_template.strip() == value.strip():
+ self.instance.telegram_title_template = None
+
+ def get_telegram_message_template(self, obj):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_TELEGRAM_MESSAGE_TEMPLATE[obj.integration]
+ return obj.telegram_message_template or default_template
+
+ def set_telegram_message_template(self, value):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_TELEGRAM_MESSAGE_TEMPLATE[
+ self.instance.integration
+ ]
+ if default_template is None or default_template.strip() != value.strip():
+ self.instance.telegram_message_template = value.strip()
+ elif default_template is not None and default_template.strip() == value.strip():
+ self.instance.telegram_message_template = None
+
+ def get_telegram_image_url_template(self, obj):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_TELEGRAM_IMAGE_URL_TEMPLATE[obj.integration]
+ return obj.telegram_image_url_template or default_template
+
+ def set_telegram_image_url_template(self, value):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_TELEGRAM_IMAGE_URL_TEMPLATE[
+ self.instance.integration
+ ]
+ if default_template is None or default_template.strip() != value.strip():
+ self.instance.telegram_image_url_template = value.strip()
+ elif default_template is not None and default_template.strip() == value.strip():
+ self.instance.telegram_image_url_template = None
+
+ def get_source_link_template(self, obj):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_SOURCE_LINK_TEMPLATE[obj.integration]
+ return obj.source_link_template or default_template
+
+ def set_source_link_template(self, value):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_SOURCE_LINK_TEMPLATE[self.instance.integration]
+ if default_template is None or default_template.strip() != value.strip():
+ self.instance.source_link = value.strip()
+ elif default_template is not None and default_template.strip() == value.strip():
+ self.instance.source_link = None
+
+ def get_grouping_id_template(self, obj):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_GROUPING_ID_TEMPLATE[obj.integration]
+ return obj.grouping_id_template or default_template
+
+ def set_grouping_id_template(self, value):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_GROUPING_ID_TEMPLATE[self.instance.integration]
+ if default_template is None or default_template.strip() != value.strip():
+ self.instance.grouping_id_template = value.strip()
+ elif default_template is not None and default_template.strip() == value.strip():
+ self.instance.grouping_id_template = None
+
+ def get_acknowledge_condition_template(self, obj):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_ACKNOWLEDGE_CONDITION_TEMPLATE[obj.integration]
+ return obj.acknowledge_condition_template or default_template
+
+ def set_acknowledge_condition_template(self, value):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_ACKNOWLEDGE_CONDITION_TEMPLATE[
+ self.instance.integration
+ ]
+ if default_template is None or default_template.strip() != value.strip():
+ self.instance.acknowledge_condition_template = value.strip()
+ elif default_template is not None and default_template.strip() == value.strip():
+ self.instance.acknowledge_condition_template = None
+
+ def get_resolve_condition_template(self, obj):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_RESOLVE_CONDITION_TEMPLATE[obj.integration]
+ return obj.resolve_condition_template or default_template
+
+ def set_resolve_condition_template(self, value):
+ default_template = AlertReceiveChannel.INTEGRATION_TO_DEFAULT_RESOLVE_CONDITION_TEMPLATE[
+ self.instance.integration
+ ]
+ if default_template is None or default_template.strip() != value.strip():
+ self.instance.resolve_condition_template = value.strip()
+ elif default_template is not None and default_template.strip() == value.strip():
+ self.instance.resolve_condition_template = None
+
+ def get_payload_example(self, obj):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ if "alert_group_id" in self.context["request"].query_params:
+ alert_group_id = self.context["request"].query_params.get("alert_group_id")
+ try:
+ return obj.alert_groups.get(public_primary_key=alert_group_id).alerts.first().raw_request_data
+ except AlertGroup.DoesNotExist:
+ raise serializers.ValidationError("Alert group doesn't exist for this integration")
+ except AttributeError:
+ raise serializers.ValidationError("Unable to retrieve example payload for this alert group")
+ else:
+ try:
+ return obj.alert_groups.last().alerts.first().raw_request_data
+ except AttributeError:
+ return None
+
+ # Override method to pass field_name directly in set_value to handle None values for WritableSerializerField
+ def to_internal_value(self, data):
+ """
+ Dict of native values <- Dict of primitive datatypes.
+ """
+ if not isinstance(data, Mapping):
+ message = self.error_messages["invalid"].format(datatype=type(data).__name__)
+ raise ValidationError({api_settings.NON_FIELD_ERRORS_KEY: [message]}, code="invalid")
+
+ ret = OrderedDict()
+ errors = OrderedDict()
+ fields = self._writable_fields
+
+ for field in fields:
+ validate_method = getattr(self, "validate_" + field.field_name, None)
+ primitive_value = field.get_value(data)
+ try:
+ validated_value = field.run_validation(primitive_value)
+ if validate_method is not None:
+ validated_value = validate_method(validated_value)
+ except ValidationError as exc:
+ errors[field.field_name] = exc.detail
+ except DjangoValidationError as exc:
+ errors[field.field_name] = get_error_detail(exc)
+ except SkipField:
+ pass
+ else:
+ # Line because of which method is overriden
+ if validated_value is None and isinstance(field, WritableSerializerMethodField):
+ set_value(ret, [field.field_name], validated_value)
+ else:
+ set_value(ret, field.source_attrs, validated_value)
+
+ # handle updates for messaging backend templates
+ messaging_backend_errors = self._handle_messaging_backend_updates(data, ret)
+ errors.update(messaging_backend_errors)
+
+ if errors:
+ raise ValidationError(errors)
+
+ return ret
+
+ def _handle_messaging_backend_updates(self, data, ret):
+ """Update additional messaging backend templates if needed."""
+ errors = {}
+ for backend_id, _ in get_messaging_backends():
+ # fetch existing templates if any
+ backend_templates = {}
+ if self.instance.messaging_backends_templates is not None:
+ backend_templates = self.instance.messaging_backends_templates.get(backend_id, {})
+ # validate updated templates if any
+ backend_updates = {}
+ for field in TEMPLATE_NAMES_ONLY_WITH_NOTIFICATION_CHANNEL:
+ field_name = f"{backend_id.lower()}_{field}_template"
+ value = data.get(field_name)
+ validator = jinja_template_env.from_string if field != IMAGE_URL else URLValidator()
+ if value is not None:
+ try:
+ if value:
+ validator(value)
+ except TemplateSyntaxError:
+ errors[field_name] = "invalid template"
+ except DjangoValidationError:
+ errors[field_name] = "invalid URL"
+ else:
+ backend_updates[field] = value
+ # update backend templates
+ backend_templates.update(backend_updates)
+ set_value(ret, ["messaging_backends_templates", backend_id], backend_templates)
+
+ return errors
+
+ def to_representation(self, obj):
+ ret = super().to_representation(obj)
+ ret = self._get_templates_to_show(ret)
+
+ # include messaging backend templates
+ additional_templates = self._get_messaging_backend_templates(obj)
+ ret.update(additional_templates)
+
+ return ret
+
+ def _get_templates_to_show(self, response_data):
+ """
+ For On-prem installations with disabled features it is needed to disable corresponding templates
+ """
+ slack_integration_required_templates = [
+ "slack_title_template",
+ "slack_message_template",
+ "slack_image_url_template",
+ ]
+ telegram_integration_required_templates = [
+ "telegram_title_template",
+ "telegram_message_template",
+ "telegram_image_url_template",
+ ]
+ if not settings.FEATURE_SLACK_INTEGRATION_ENABLED:
+ for st in slack_integration_required_templates:
+ response_data.pop(st)
+ if not settings.FEATURE_TELEGRAM_INTEGRATION_ENABLED:
+ for tt in telegram_integration_required_templates:
+ response_data.pop(tt)
+
+ return response_data
+
+ def _get_messaging_backend_templates(self, obj):
+ """Return additional messaging backend templates if any."""
+ templates = {}
+ for backend_id, _ in get_messaging_backends():
+ for field in ("title", "message", "image_url"):
+ value = None
+ if obj.messaging_backends_templates:
+ value = obj.messaging_backends_templates.get(backend_id, {}).get(field)
+ if value is None:
+ value = obj.get_default_template_attribute(backend_id, field)
+ field_name = f"{backend_id.lower()}_{field}_template"
+ templates[field_name] = value
+ return templates
diff --git a/engine/apps/api/serializers/channel_filter.py b/engine/apps/api/serializers/channel_filter.py
new file mode 100644
index 0000000000..89a84a4a0c
--- /dev/null
+++ b/engine/apps/api/serializers/channel_filter.py
@@ -0,0 +1,186 @@
+from django.apps import apps
+from rest_framework import serializers
+
+from apps.alerts.models import AlertReceiveChannel, ChannelFilter, EscalationChain
+from apps.base.messaging import get_messaging_backend_from_id
+from apps.telegram.models import TelegramToOrganizationConnector
+from common.api_helpers.custom_fields import OrganizationFilteredPrimaryKeyRelatedField
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.mixins import EagerLoadingMixin, OrderedModelSerializerMixin
+from common.utils import is_regex_valid
+
+
+class ChannelFilterSerializer(OrderedModelSerializerMixin, EagerLoadingMixin, serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ alert_receive_channel = OrganizationFilteredPrimaryKeyRelatedField(queryset=AlertReceiveChannel.objects)
+ escalation_chain = OrganizationFilteredPrimaryKeyRelatedField(
+ queryset=EscalationChain.objects,
+ filter_field="organization",
+ allow_null=True,
+ required=False,
+ )
+ slack_channel = serializers.SerializerMethodField()
+ telegram_channel = OrganizationFilteredPrimaryKeyRelatedField(
+ queryset=TelegramToOrganizationConnector.objects, filter_field="organization", allow_null=True, required=False
+ )
+ order = serializers.IntegerField(required=False)
+
+ SELECT_RELATED = ["escalation_chain", "alert_receive_channel"]
+
+ class Meta:
+ model = ChannelFilter
+ fields = [
+ "id",
+ "order",
+ "alert_receive_channel",
+ "escalation_chain",
+ "slack_channel",
+ "created_at",
+ "filtering_term",
+ "telegram_channel",
+ "is_default",
+ "notify_in_slack",
+ "notify_in_telegram",
+ "notification_backends",
+ ]
+ read_only_fields = ["created_at", "is_default"]
+ extra_kwargs = {"filtering_term": {"required": True, "allow_null": False}}
+
+ def get_slack_channel(self, obj):
+ if obj.slack_channel_id is None:
+ return None
+ # display_name and id appears via annotate in ChannelFilterView.get_queryset()
+ return {
+ "display_name": obj.slack_channel_name,
+ "slack_id": obj.slack_channel_id,
+ "id": obj.slack_channel_pk,
+ }
+
+ def validate(self, attrs):
+ alert_receive_channel = attrs.get("alert_receive_channel") or self.instance.alert_receive_channel
+ filtering_term = attrs.get("filtering_term")
+ if filtering_term is None:
+ return attrs
+ try:
+ obj = ChannelFilter.objects.get(alert_receive_channel=alert_receive_channel, filtering_term=filtering_term)
+ except ChannelFilter.DoesNotExist:
+ return attrs
+ if self.instance and obj.id == self.instance.id:
+ return attrs
+ else:
+ raise serializers.ValidationError(
+ {"filtering_term": ["Channel filter with this filtering term already exists"]}
+ )
+
+ def validate_slack_channel(self, slack_channel_id):
+ SlackChannel = apps.get_model("slack", "SlackChannel")
+
+ if slack_channel_id is not None:
+ slack_channel_id = slack_channel_id.upper()
+ organization = self.context["request"].auth.organization
+ try:
+ organization.slack_team_identity.get_cached_channels().get(slack_id=slack_channel_id)
+ except SlackChannel.DoesNotExist:
+ raise serializers.ValidationError(["Slack channel does not exist"])
+ return slack_channel_id
+
+ def validate_filtering_term(self, filtering_term):
+ if filtering_term is not None:
+ if not is_regex_valid(filtering_term):
+ raise serializers.ValidationError(["Filtering term is incorrect"])
+ return filtering_term
+
+ def validate_notification_backends(self, notification_backends):
+ # NOTE: updates the whole field, handling dict updates per backend
+ if notification_backends is not None:
+ if not isinstance(notification_backends, dict):
+ raise serializers.ValidationError(["Invalid messaging backend data"])
+ current = self.instance.notification_backends or {}
+ for backend_id in notification_backends:
+ backend = get_messaging_backend_from_id(backend_id)
+ if backend is None:
+ raise serializers.ValidationError(["Invalid messaging backend"])
+ updated_data = backend.validate_channel_filter_data(
+ self.instance,
+ notification_backends[backend_id],
+ )
+ # update existing backend data
+ notification_backends[backend_id] = current.get(backend_id, {}) | updated_data
+ return notification_backends
+
+
+class ChannelFilterCreateSerializer(ChannelFilterSerializer):
+ alert_receive_channel = OrganizationFilteredPrimaryKeyRelatedField(queryset=AlertReceiveChannel.objects)
+ slack_channel = serializers.CharField(allow_null=True, required=False, source="slack_channel_id")
+
+ class Meta:
+ model = ChannelFilter
+ fields = [
+ "id",
+ "order",
+ "alert_receive_channel",
+ "escalation_chain",
+ "slack_channel",
+ "created_at",
+ "filtering_term",
+ "telegram_channel",
+ "is_default",
+ "notify_in_slack",
+ "notify_in_telegram",
+ "notification_backends",
+ ]
+ read_only_fields = ["created_at", "is_default"]
+ extra_kwargs = {"filtering_term": {"required": True, "allow_null": False}}
+
+ def to_representation(self, obj):
+ """add correct slack channel data to result after instance creation/update"""
+ result = super().to_representation(obj)
+ if obj.slack_channel_id is None:
+ result["slack_channel"] = None
+ else:
+ slack_team_identity = self.context["request"].auth.organization.slack_team_identity
+ if slack_team_identity is not None:
+ slack_channel = slack_team_identity.get_cached_channels(slack_id=obj.slack_channel_id).first()
+ if slack_channel:
+ result["slack_channel"] = {
+ "display_name": slack_channel.name,
+ "slack_id": obj.slack_channel_id,
+ "id": slack_channel.public_primary_key,
+ }
+ return result
+
+ def create(self, validated_data):
+ order = validated_data.pop("order", None)
+ if order is not None:
+ alert_receive_channel_id = validated_data.get("alert_receive_channel")
+ self._validate_order(order, {"alert_receive_channel_id": alert_receive_channel_id, "is_default": False})
+ instance = super().create(validated_data)
+ self._change_position(order, instance)
+ else:
+ instance = super().create(validated_data)
+ return instance
+
+
+class ChannelFilterUpdateSerializer(ChannelFilterCreateSerializer):
+ alert_receive_channel = OrganizationFilteredPrimaryKeyRelatedField(read_only=True)
+
+ class Meta(ChannelFilterCreateSerializer.Meta):
+ read_only_fields = [*ChannelFilterCreateSerializer.Meta.read_only_fields, "alert_receive_channel"]
+ extra_kwargs = {"filtering_term": {"required": False}}
+
+ def update(self, instance, validated_data):
+ order = validated_data.get("order")
+ filtering_term = validated_data.get("filtering_term")
+
+ if instance.is_default and order is not None and instance.order != order:
+ raise BadRequest(detail="The order of default channel filter cannot be changed")
+
+ if instance.is_default and filtering_term is not None:
+ raise BadRequest(detail="Filtering term of default channel filter cannot be changed")
+
+ if order is not None:
+ self._validate_order(
+ order, {"alert_receive_channel_id": instance.alert_receive_channel_id, "is_default": False}
+ )
+ self._change_position(order, instance)
+ return super().update(instance, validated_data)
diff --git a/engine/apps/api/serializers/custom_button.py b/engine/apps/api/serializers/custom_button.py
new file mode 100644
index 0000000000..11e184e7e3
--- /dev/null
+++ b/engine/apps/api/serializers/custom_button.py
@@ -0,0 +1,68 @@
+import json
+
+from django.core.validators import URLValidator, ValidationError
+from jinja2 import Template, TemplateError
+from rest_framework import serializers
+from rest_framework.validators import UniqueTogetherValidator
+
+from apps.alerts.models import CustomButton
+from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField
+from common.api_helpers.utils import CurrentOrganizationDefault, CurrentTeamDefault
+
+
+class CustomButtonSerializer(serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ organization = serializers.HiddenField(default=CurrentOrganizationDefault())
+ team = TeamPrimaryKeyRelatedField(allow_null=True, default=CurrentTeamDefault())
+ forward_whole_payload = serializers.BooleanField(allow_null=True, required=False)
+
+ class Meta:
+ model = CustomButton
+ fields = [
+ "id",
+ "name",
+ "team",
+ "webhook",
+ "data",
+ "user",
+ "password",
+ "authorization_header",
+ "organization",
+ "forward_whole_payload",
+ ]
+ extra_kwargs = {
+ "name": {"required": True, "allow_null": False, "allow_blank": False},
+ "webhook": {"required": True, "allow_null": False, "allow_blank": False},
+ }
+
+ validators = [UniqueTogetherValidator(queryset=CustomButton.objects.all(), fields=["name", "organization"])]
+
+ def validate_webhook(self, webhook):
+ if webhook:
+ try:
+ URLValidator()(webhook)
+ except ValidationError:
+ raise serializers.ValidationError("Webhook is incorrect")
+ return webhook
+ return None
+
+ def validate_data(self, data):
+ if not data:
+ return None
+
+ try:
+ json.loads(data)
+ except ValueError:
+ raise serializers.ValidationError("Data has incorrect format")
+
+ try:
+ Template(data)
+ except TemplateError:
+ raise serializers.ValidationError("Data has incorrect template")
+
+ return data
+
+ def validate_forward_whole_payload(self, data):
+ if data is None:
+ return False
+ return data
diff --git a/engine/apps/api/serializers/custom_serializers.py b/engine/apps/api/serializers/custom_serializers.py
new file mode 100644
index 0000000000..fdd1739ce1
--- /dev/null
+++ b/engine/apps/api/serializers/custom_serializers.py
@@ -0,0 +1,14 @@
+from rest_framework import serializers
+
+
+class DynamicFieldsModelSerializer(serializers.ModelSerializer):
+ def __init__(self, *args, **kwargs):
+ fields = kwargs.pop("fields", None)
+
+ super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)
+
+ if fields is not None:
+ allowed = set(fields)
+ existing = set(self.fields)
+ for field_name in existing - allowed:
+ self.fields.pop(field_name)
diff --git a/engine/apps/api/serializers/escalation_chain.py b/engine/apps/api/serializers/escalation_chain.py
new file mode 100644
index 0000000000..d856bcb004
--- /dev/null
+++ b/engine/apps/api/serializers/escalation_chain.py
@@ -0,0 +1,31 @@
+from rest_framework import serializers
+
+from apps.alerts.models import EscalationChain
+from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField
+from common.api_helpers.utils import CurrentOrganizationDefault, CurrentTeamDefault
+
+
+class EscalationChainSerializer(serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ organization = serializers.HiddenField(default=CurrentOrganizationDefault())
+ team = TeamPrimaryKeyRelatedField(allow_null=True, default=CurrentTeamDefault())
+
+ class Meta:
+ model = EscalationChain
+ fields = ("id", "name", "organization", "team")
+
+
+class EscalationChainListSerializer(EscalationChainSerializer):
+ number_of_integrations = serializers.SerializerMethodField()
+ number_of_routes = serializers.SerializerMethodField()
+
+ class Meta(EscalationChainSerializer.Meta):
+ fields = [*EscalationChainSerializer.Meta.fields, "number_of_integrations", "number_of_routes"]
+
+ def get_number_of_integrations(self, obj):
+ # num_integrations param added in queryset via annotate. Check EscalationChainViewSet.get_queryset
+ return getattr(obj, "num_integrations")
+
+ def get_number_of_routes(self, obj):
+ # num_routes param added in queryset via annotate. Check EscalationChainViewSet.get_queryset
+ return getattr(obj, "num_routes")
diff --git a/engine/apps/api/serializers/escalation_policy.py b/engine/apps/api/serializers/escalation_policy.py
new file mode 100644
index 0000000000..5d85e5343d
--- /dev/null
+++ b/engine/apps/api/serializers/escalation_policy.py
@@ -0,0 +1,231 @@
+import time
+from datetime import timedelta
+
+from rest_framework import serializers
+
+from apps.alerts.models import CustomButton, EscalationChain, EscalationPolicy
+from apps.schedules.models import OnCallSchedule
+from apps.slack.models import SlackUserGroup
+from apps.user_management.models import User
+from common.api_helpers.custom_fields import (
+ OrganizationFilteredPrimaryKeyRelatedField,
+ UsersFilteredByOrganizationField,
+)
+from common.api_helpers.mixins import EagerLoadingMixin
+
+WAIT_DELAY = "wait_delay"
+NOTIFY_SCHEDULE = "notify_schedule"
+NOTIFY_TO_USERS_QUEUE = "notify_to_users_queue"
+NOTIFY_GROUP = "notify_to_group"
+FROM_TIME = "from_time"
+TO_TIME = "to_time"
+NUM_ALERTS_IN_WINDOW = "num_alerts_in_window"
+NUM_MINUTES_IN_WINDOW = "num_minutes_in_window"
+CUSTOM_BUTTON_TRIGGER = "custom_button_trigger"
+
+STEP_TYPE_TO_RELATED_FIELD_MAP = {
+ EscalationPolicy.STEP_WAIT: [WAIT_DELAY],
+ EscalationPolicy.STEP_NOTIFY_SCHEDULE: [NOTIFY_SCHEDULE],
+ EscalationPolicy.STEP_NOTIFY_USERS_QUEUE: [NOTIFY_TO_USERS_QUEUE],
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS: [NOTIFY_TO_USERS_QUEUE],
+ EscalationPolicy.STEP_NOTIFY_GROUP: [NOTIFY_GROUP],
+ EscalationPolicy.STEP_NOTIFY_IF_TIME: [FROM_TIME, TO_TIME],
+ EscalationPolicy.STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW: [NUM_ALERTS_IN_WINDOW, NUM_MINUTES_IN_WINDOW],
+ EscalationPolicy.STEP_TRIGGER_CUSTOM_BUTTON: [CUSTOM_BUTTON_TRIGGER],
+}
+
+
+class EscalationPolicySerializer(EagerLoadingMixin, serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ escalation_chain = OrganizationFilteredPrimaryKeyRelatedField(queryset=EscalationChain.objects)
+ important = serializers.BooleanField(required=False)
+
+ notify_to_users_queue = UsersFilteredByOrganizationField(
+ queryset=User.objects,
+ required=False,
+ )
+ wait_delay = serializers.ChoiceField(
+ required=False,
+ choices=EscalationPolicy.WEB_DURATION_CHOICES,
+ allow_null=True,
+ )
+ num_minutes_in_window = serializers.ChoiceField(
+ required=False,
+ choices=EscalationPolicy.WEB_DURATION_CHOICES_MINUTES,
+ allow_null=True,
+ )
+ notify_schedule = OrganizationFilteredPrimaryKeyRelatedField(
+ queryset=OnCallSchedule.objects,
+ required=False,
+ allow_null=True,
+ )
+ notify_to_group = OrganizationFilteredPrimaryKeyRelatedField(
+ queryset=SlackUserGroup.objects,
+ required=False,
+ allow_null=True,
+ filter_field="slack_team_identity__organizations",
+ )
+ custom_button_trigger = OrganizationFilteredPrimaryKeyRelatedField(
+ queryset=CustomButton.objects,
+ required=False,
+ allow_null=True,
+ filter_field="organization",
+ )
+
+ class Meta:
+ model = EscalationPolicy
+ fields = [
+ "id",
+ "order",
+ "step",
+ "wait_delay",
+ "escalation_chain",
+ "notify_to_users_queue",
+ "from_time",
+ "to_time",
+ "num_alerts_in_window",
+ "num_minutes_in_window",
+ "slack_integration_required",
+ "custom_button_trigger",
+ "notify_schedule",
+ "notify_to_group",
+ "important",
+ ]
+ read_only_fields = ("order",)
+
+ SELECT_RELATED = ["escalation_chain", "notify_schedule", "notify_to_group", "custom_button_trigger"]
+ PREFETCH_RELATED = ["notify_to_users_queue"]
+
+ def validate(self, data):
+
+ fields_to_check = [
+ WAIT_DELAY,
+ NOTIFY_SCHEDULE,
+ NOTIFY_TO_USERS_QUEUE,
+ NOTIFY_GROUP,
+ FROM_TIME,
+ TO_TIME,
+ NUM_ALERTS_IN_WINDOW,
+ NUM_MINUTES_IN_WINDOW,
+ CUSTOM_BUTTON_TRIGGER,
+ ]
+
+ step = data.get("step")
+ if step is None:
+ raise serializers.ValidationError({"step": "This field is required."})
+
+ if data.get("important") and step in EscalationPolicy.STEPS_WITH_NO_IMPORTANT_VERSION_SET:
+ raise serializers.ValidationError(f"Step {step} can't be important")
+
+ for f in STEP_TYPE_TO_RELATED_FIELD_MAP.get(step, []):
+ fields_to_check.remove(f)
+
+ for field in fields_to_check:
+ if field == NOTIFY_TO_USERS_QUEUE:
+ # notify_to_users queue is m-to-m relation so we use empty list instead of None
+ if len(data.get(field, [])) != 0:
+ raise serializers.ValidationError(f"Invalid combination if step {step} and {field}")
+ else:
+ if data.get(field, None) is not None:
+ raise serializers.ValidationError(f"Invalid combination if step {step} and {field}")
+ return data
+
+ def validate_step(self, step_type):
+ organization = self.context["request"].user.organization
+ if step_type not in EscalationPolicy.INTERNAL_API_STEPS:
+ raise serializers.ValidationError("Invalid step value")
+ if step_type in EscalationPolicy.SLACK_INTEGRATION_REQUIRED_STEPS and organization.slack_team_identity is None:
+ raise serializers.ValidationError("Invalid escalation step type: step is Slack-specific")
+ return step_type
+
+ def to_internal_value(self, data):
+ data = self._wait_delay_to_internal_value(data)
+ return super().to_internal_value(data)
+
+ def to_representation(self, instance):
+ step = instance.step
+ result = super().to_representation(instance)
+ result = EscalationPolicySerializer._get_important_field(step, result)
+ return result
+
+ @staticmethod
+ def _wait_delay_to_internal_value(data):
+ if data.get(WAIT_DELAY, None):
+ try:
+ time.strptime(data[WAIT_DELAY], "%H:%M:%S")
+ except ValueError:
+ try:
+ data[WAIT_DELAY] = str(timedelta(seconds=float(data[WAIT_DELAY])))
+ except ValueError:
+ raise serializers.ValidationError("Invalid wait delay format")
+
+ return data
+
+ @staticmethod
+ def _get_important_field(step, result):
+ if step in {*EscalationPolicy.DEFAULT_STEPS_SET, *EscalationPolicy.STEPS_WITH_NO_IMPORTANT_VERSION_SET}:
+ result["important"] = False
+ elif step in EscalationPolicy.IMPORTANT_STEPS_SET:
+ result["important"] = True
+ result["step"] = EscalationPolicy.IMPORTANT_TO_DEFAULT_STEP_MAPPING[step]
+ return result
+
+ @staticmethod
+ def _convert_to_important_step_if_needed(validated_data):
+ step = validated_data.get("step")
+ important = validated_data.pop("important", None)
+
+ if step in EscalationPolicy.DEFAULT_STEPS_SET and important:
+ validated_data["step"] = EscalationPolicy.DEFAULT_TO_IMPORTANT_STEP_MAPPING[step]
+
+ return validated_data
+
+
+class EscalationPolicyCreateSerializer(EscalationPolicySerializer):
+ class Meta(EscalationPolicySerializer.Meta):
+ read_only_fields = ("order",)
+ extra_kwargs = {"escalation_chain": {"required": True, "allow_null": False}}
+
+ def create(self, validated_data):
+ validated_data = EscalationPolicyCreateSerializer._convert_to_important_step_if_needed(validated_data)
+ instance = super().create(validated_data)
+ return instance
+
+
+class EscalationPolicyUpdateSerializer(EscalationPolicySerializer):
+ escalation_chain = serializers.CharField(read_only=True, source="escalation_chain.public_primary_key")
+
+ class Meta(EscalationPolicySerializer.Meta):
+ read_only_fields = ("order", "escalation_chain")
+
+ def update(self, instance, validated_data):
+ step = validated_data.get("step", instance.step)
+ validated_data = EscalationPolicyUpdateSerializer._drop_not_step_type_related_fields(step, validated_data)
+ validated_data = EscalationPolicyUpdateSerializer._convert_to_important_step_if_needed(validated_data)
+ return super().update(instance, validated_data)
+
+ @staticmethod
+ def _drop_not_step_type_related_fields(step, validated_data):
+ fields_to_set_none = [
+ WAIT_DELAY,
+ NOTIFY_SCHEDULE,
+ NOTIFY_TO_USERS_QUEUE,
+ NOTIFY_GROUP,
+ FROM_TIME,
+ TO_TIME,
+ NUM_ALERTS_IN_WINDOW,
+ NUM_MINUTES_IN_WINDOW,
+ CUSTOM_BUTTON_TRIGGER,
+ ]
+
+ for f in STEP_TYPE_TO_RELATED_FIELD_MAP.get(step, []):
+ fields_to_set_none.remove(f)
+
+ for f in fields_to_set_none:
+ if f == NOTIFY_TO_USERS_QUEUE:
+ # notify_to_users queue is m-to-m relation so we use empty list instead of None
+ validated_data[f] = []
+ else:
+ validated_data[f] = None
+
+ return validated_data
diff --git a/engine/apps/api/serializers/integration_heartbeat.py b/engine/apps/api/serializers/integration_heartbeat.py
new file mode 100644
index 0000000000..706afc55ec
--- /dev/null
+++ b/engine/apps/api/serializers/integration_heartbeat.py
@@ -0,0 +1,69 @@
+import humanize
+from django.conf import settings
+from django.template.loader import render_to_string
+from django.utils import timezone
+from rest_framework import serializers
+
+from apps.alerts.models import AlertReceiveChannel
+from apps.heartbeat.models import IntegrationHeartBeat
+from common.api_helpers.custom_fields import OrganizationFilteredPrimaryKeyRelatedField
+from common.api_helpers.mixins import EagerLoadingMixin
+
+NO_INSTRUCTION_MESSAGE = "No instruction"
+
+
+class IntegrationHeartBeatSerializer(EagerLoadingMixin, serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ alert_receive_channel = OrganizationFilteredPrimaryKeyRelatedField(queryset=AlertReceiveChannel.objects)
+ timeout_seconds = serializers.ChoiceField(
+ allow_null=False,
+ required=True,
+ choices=IntegrationHeartBeat.TIMEOUT_CHOICES,
+ )
+ last_heartbeat_time_verbal = serializers.SerializerMethodField()
+ instruction = serializers.SerializerMethodField()
+
+ SELECT_RELATED = ["alert_receive_channel"]
+
+ class Meta:
+ model = IntegrationHeartBeat
+ fields = [
+ "id",
+ "timeout_seconds",
+ "alert_receive_channel",
+ "link",
+ "last_heartbeat_time_verbal",
+ "status",
+ "instruction",
+ ]
+
+ def validate_alert_receive_channel(self, alert_receive_channel):
+ if alert_receive_channel.is_available_for_integration_heartbeat:
+ return alert_receive_channel
+ else:
+ raise serializers.ValidationError(
+ {"alert_receive_channel": "Heartbeat is not available for this integration"}
+ )
+
+ def get_last_heartbeat_time_verbal(self, obj):
+ return self._last_heartbeat_time_verbal(obj) if obj.last_heartbeat_time else None
+
+ def get_instruction(self, obj):
+ rendered_instruction = render_to_string(
+ obj.alert_receive_channel.heartbeat_instruction_template,
+ {
+ "heartbeat_url": obj.link,
+ "service_url": settings.BASE_URL,
+ },
+ )
+ return rendered_instruction
+
+ @staticmethod
+ def _last_heartbeat_time_verbal(instance):
+ """
+ This method simplifies testing.
+ To compare expected_payload with response.json() it is needed to calculate "now" same way in test and serializer.
+ It is easier to implement separate method and mock in tests.
+ """
+ now = timezone.now()
+ return humanize.naturaldelta(now - instance.last_heartbeat_time)
diff --git a/engine/apps/api/serializers/live_setting.py b/engine/apps/api/serializers/live_setting.py
new file mode 100644
index 0000000000..bf4313416c
--- /dev/null
+++ b/engine/apps/api/serializers/live_setting.py
@@ -0,0 +1,37 @@
+from rest_framework import serializers
+
+from apps.base.models import LiveSetting
+
+
+class LiveSettingSerializer(serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ value = serializers.JSONField(allow_null=False)
+
+ class Meta:
+ model = LiveSetting
+ fields = (
+ "id",
+ "name",
+ "description",
+ "default_value",
+ "value",
+ "error",
+ "is_secret",
+ )
+
+ def to_representation(self, instance):
+ ret = super().to_representation(instance)
+
+ def hide_secret(value):
+ # transform sensitive credentials to ******1234
+ prefix = 6 * "*"
+ return prefix + value[-4:]
+
+ if instance.is_secret:
+ if instance.value:
+ ret["value"] = hide_secret(instance.value)
+
+ if instance.default_value:
+ ret["default_value"] = hide_secret(instance.default_value)
+
+ return ret
diff --git a/engine/apps/api/serializers/organization.py b/engine/apps/api/serializers/organization.py
new file mode 100644
index 0000000000..4dc6940292
--- /dev/null
+++ b/engine/apps/api/serializers/organization.py
@@ -0,0 +1,175 @@
+from datetime import timedelta
+
+import humanize
+import pytz
+from django.apps import apps
+from django.conf import settings
+from django.utils import timezone
+from rest_framework import fields, serializers
+
+from apps.base.models import LiveSetting
+from apps.slack.models import SlackTeamIdentity
+from apps.slack.tasks import resolve_archived_incidents_for_organization, unarchive_incidents_for_organization
+from apps.user_management.models import Organization
+from common.api_helpers.mixins import EagerLoadingMixin
+
+
+class CustomDateField(fields.TimeField):
+ def to_internal_value(self, data):
+ try:
+ archive_datetime = timezone.datetime.fromisoformat(data).astimezone(pytz.UTC)
+ except (TypeError, ValueError):
+ raise serializers.ValidationError({"archive_alerts_from": ["Invalid date format"]})
+ if archive_datetime.date() >= timezone.now().date():
+ raise serializers.ValidationError({"archive_alerts_from": ["Invalid date. Date must be less than today."]})
+ return archive_datetime
+
+
+class FastSlackTeamIdentitySerializer(serializers.ModelSerializer):
+ class Meta:
+ model = SlackTeamIdentity
+ fields = ["cached_name"]
+
+
+class OrganizationSerializer(EagerLoadingMixin, serializers.ModelSerializer):
+ pk = serializers.CharField(read_only=True, source="public_primary_key")
+ slack_team_identity = FastSlackTeamIdentitySerializer(read_only=True)
+
+ name = serializers.CharField(required=False, allow_null=True, allow_blank=True, source="org_title")
+ # name_slug = serializers.CharField(required=False, allow_null=True, allow_blank=False)
+ maintenance_till = serializers.ReadOnlyField(source="till_maintenance_timestamp")
+ slack_channel = serializers.SerializerMethodField()
+
+ SELECT_RELATED = ["slack_team_identity"]
+
+ class Meta:
+ model = Organization
+ fields = [
+ "pk",
+ "name",
+ # "name_slug",
+ # "is_new_version",
+ "slack_team_identity",
+ "maintenance_mode",
+ "maintenance_till",
+ # "incident_retention_web_report",
+ # "number_of_employees",
+ "slack_channel",
+ ]
+ read_only_fields = [
+ "is_new_version",
+ "slack_team_identity",
+ "maintenance_mode",
+ "maintenance_till",
+ # "incident_retention_web_report",
+ ]
+
+ def get_slack_channel(self, obj):
+ SlackChannel = apps.get_model("slack", "SlackChannel")
+ if obj.general_log_channel_id is None or obj.slack_team_identity is None:
+ return None
+ try:
+ channel = obj.slack_team_identity.get_cached_channels().get(slack_id=obj.general_log_channel_id)
+ except SlackChannel.DoesNotExist:
+ return {"display_name": None, "slack_id": obj.general_log_channel_id, "id": None}
+
+ return {
+ "display_name": channel.name,
+ "slack_id": channel.slack_id,
+ "id": channel.public_primary_key,
+ }
+
+
+class CurrentOrganizationSerializer(OrganizationSerializer):
+ limits = serializers.SerializerMethodField()
+ env_status = serializers.SerializerMethodField()
+ banner = serializers.SerializerMethodField()
+
+ class Meta(OrganizationSerializer.Meta):
+ fields = [
+ *OrganizationSerializer.Meta.fields,
+ "limits",
+ "archive_alerts_from",
+ "is_resolution_note_required",
+ "env_status",
+ "banner",
+ ]
+ read_only_fields = [
+ *OrganizationSerializer.Meta.read_only_fields,
+ "limits",
+ "banner",
+ ]
+
+ def get_banner(self, obj):
+ DynamicSetting = apps.get_model("base", "DynamicSetting")
+ banner = DynamicSetting.objects.get_or_create(
+ name="banner",
+ defaults={"json_value": {"title": None, "body": None}},
+ )[0]
+ return banner.json_value
+
+ def get_limits(self, obj):
+ user = self.context["request"].user
+ return obj.notifications_limit_web_report(user)
+
+ def get_env_status(self, obj):
+ LiveSetting.populate_settings_if_needed()
+
+ telegram_configured = not LiveSetting.objects.filter(name__startswith="TELEGRAM", error__isnull=False).exists()
+ twilio_configured = not LiveSetting.objects.filter(name__startswith="TWILIO", error__isnull=False).exists()
+
+ return {
+ "telegram_configured": telegram_configured,
+ "twilio_configured": twilio_configured,
+ "extra_messaging_backends_enabled": settings.FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED,
+ }
+
+ def get_stats(self, obj):
+ if isinstance(obj.cached_seconds_saved_by_amixr, int):
+ verbal_time_saved_by_amixr = humanize.naturaldelta(timedelta(seconds=obj.cached_seconds_saved_by_amixr))
+ else:
+ verbal_time_saved_by_amixr = None
+
+ res = {
+ "grouped_percent": obj.cached_grouped_percent,
+ "alerts_count": obj.cached_alerts_count,
+ "noise_reduction": obj.cached_noise_reduction,
+ "average_response_time": humanize.naturaldelta(obj.cached_average_response_time),
+ "verbal_time_saved_by_amixr": verbal_time_saved_by_amixr,
+ }
+
+ return res
+
+ def update(self, instance, validated_data):
+ current_archive_date = instance.archive_alerts_from
+ archive_alerts_from = validated_data.get("archive_alerts_from")
+
+ result = super().update(instance, validated_data)
+ if archive_alerts_from is not None and current_archive_date != archive_alerts_from:
+ if current_archive_date > archive_alerts_from:
+ unarchive_incidents_for_organization.apply_async(
+ (instance.pk,),
+ )
+ resolve_archived_incidents_for_organization.apply_async(
+ (instance.pk,),
+ )
+
+ return result
+
+
+class FastOrganizationSerializer(serializers.ModelSerializer):
+ pk = serializers.CharField(read_only=True, source="public_primary_key")
+ name = serializers.CharField(read_only=True, source="org_title")
+
+ class Meta:
+ model = Organization
+ fields = ["pk", "name"]
+
+
+class PluginOrganizationSerializer(serializers.ModelSerializer):
+ pk = serializers.CharField(read_only=True, source="public_primary_key")
+ grafana_token = serializers.CharField(write_only=True, source="api_token")
+
+ class Meta:
+ model = Organization
+ fields = ["pk", "stack_id", "stack_slug", "grafana_url", "org_id", "org_slug", "org_title", "grafana_token"]
diff --git a/engine/apps/api/serializers/organization_log_record.py b/engine/apps/api/serializers/organization_log_record.py
new file mode 100644
index 0000000000..feb52f50e5
--- /dev/null
+++ b/engine/apps/api/serializers/organization_log_record.py
@@ -0,0 +1,38 @@
+from emoji import emojize
+from rest_framework import serializers
+
+from apps.base.models import OrganizationLogRecord
+from common.api_helpers.mixins import EagerLoadingMixin
+
+
+class OrganizationLogRecordSerializer(EagerLoadingMixin, serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ author = serializers.SerializerMethodField()
+ description = serializers.SerializerMethodField()
+
+ class Meta:
+ model = OrganizationLogRecord
+ fields = [
+ "id",
+ "author",
+ "created_at",
+ "description",
+ "labels",
+ ]
+
+ read_only_fields = fields.copy()
+
+ PREFETCH_RELATED = [
+ "author__organization",
+ # "author__slack_user_identities__slack_team_identity__amixr_team",
+ ]
+
+ SELECT_RELATED = ["author", "organization"]
+
+ def get_author(self, obj):
+ if obj.author:
+ user_data = obj.author.short()
+ return user_data
+
+ def get_description(self, obj):
+ return emojize(obj.description, use_aliases=True).replace("\n", " ")
diff --git a/engine/apps/api/serializers/organization_slack_settings.py b/engine/apps/api/serializers/organization_slack_settings.py
new file mode 100644
index 0000000000..f1b628132f
--- /dev/null
+++ b/engine/apps/api/serializers/organization_slack_settings.py
@@ -0,0 +1,20 @@
+from rest_framework import serializers
+
+from apps.user_management.models import Organization
+
+
+class OrganizationSlackSettingsSerializer(serializers.ModelSerializer):
+ pk = serializers.CharField(read_only=True, source="public_primary_key")
+
+ class Meta:
+ model = Organization
+ fields = [
+ "pk",
+ "acknowledge_remind_timeout",
+ "unacknowledge_timeout",
+ ]
+
+ def update(self, instance, validated_data):
+ if validated_data.get("acknowledge_remind_timeout") == 0:
+ validated_data["unacknowledge_timeout"] = 0
+ return super().update(instance, validated_data)
diff --git a/engine/apps/api/serializers/public_api_token.py b/engine/apps/api/serializers/public_api_token.py
new file mode 100644
index 0000000000..f3f0aaf05b
--- /dev/null
+++ b/engine/apps/api/serializers/public_api_token.py
@@ -0,0 +1,13 @@
+from rest_framework import serializers
+
+from apps.auth_token.models import ApiAuthToken
+
+
+class PublicApiTokenSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = ApiAuthToken
+ fields = [
+ "id",
+ "name",
+ "created_at",
+ ]
diff --git a/engine/apps/api/serializers/resolution_note.py b/engine/apps/api/serializers/resolution_note.py
new file mode 100644
index 0000000000..330259e3f1
--- /dev/null
+++ b/engine/apps/api/serializers/resolution_note.py
@@ -0,0 +1,64 @@
+from rest_framework import serializers
+
+from apps.alerts.models import AlertGroup, ResolutionNote
+from apps.alerts.tasks import invalidate_web_cache_for_alert_group
+from apps.api.serializers.user import FastUserSerializer
+from common.api_helpers.custom_fields import OrganizationFilteredPrimaryKeyRelatedField
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.mixins import EagerLoadingMixin
+
+
+class ResolutionNoteSerializer(EagerLoadingMixin, serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ alert_group = OrganizationFilteredPrimaryKeyRelatedField(
+ filter_field="channel__organization",
+ queryset=AlertGroup.unarchived_objects,
+ )
+ text = serializers.CharField(allow_null=False, source="message_text")
+ author = FastUserSerializer(read_only=True)
+
+ SELECT_RELATED = ["resolution_note_slack_message", "author"]
+
+ class Meta:
+ model = ResolutionNote
+ fields = [
+ "id",
+ "alert_group",
+ "source",
+ "author",
+ "created_at",
+ "text",
+ ]
+ read_only_fields = [
+ "author",
+ "created_at",
+ "source",
+ ]
+
+ def create(self, validated_data):
+ validated_data["author"] = self.context["request"].user
+ validated_data["source"] = ResolutionNote.Source.WEB
+ created_instance = super().create(validated_data)
+ # Invalidate alert group cache because resolution notes shown in alert group's timeline
+ created_instance.alert_group.drop_cached_after_resolve_report_json()
+ invalidate_web_cache_for_alert_group(alert_group_pk=created_instance.alert_group.pk)
+ return created_instance
+
+ def to_representation(self, instance):
+ result = super().to_representation(instance)
+ result["text"] = instance.text
+ result["source"] = {"id": instance.source, "display_name": instance.get_source_display()}
+ return result
+
+
+class ResolutionNoteUpdateSerializer(ResolutionNoteSerializer):
+ alert_group = OrganizationFilteredPrimaryKeyRelatedField(read_only=True)
+
+ def update(self, instance, validated_data):
+ if instance.source != ResolutionNote.Source.WEB:
+ raise BadRequest(detail="Cannot update message with this source type")
+ updated_instance = super().update(instance, validated_data)
+ # Invalidate alert group cache because resolution notes shown in alert group's timeline
+ updated_instance.alert_group.drop_cached_after_resolve_report_json()
+ invalidate_web_cache_for_alert_group(alert_group_pk=updated_instance.alert_group.pk)
+ return updated_instance
diff --git a/engine/apps/api/serializers/schedule_base.py b/engine/apps/api/serializers/schedule_base.py
new file mode 100644
index 0000000000..b9e07a24a4
--- /dev/null
+++ b/engine/apps/api/serializers/schedule_base.py
@@ -0,0 +1,85 @@
+from django.utils import timezone
+from rest_framework import serializers
+
+from apps.api.serializers.user_group import UserGroupSerializer
+from apps.schedules.ical_utils import list_users_to_notify_from_ical
+from apps.schedules.tasks import schedule_notify_about_empty_shifts_in_schedule, schedule_notify_about_gaps_in_schedule
+from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField
+from common.api_helpers.mixins import EagerLoadingMixin
+from common.api_helpers.utils import CurrentOrganizationDefault, CurrentTeamDefault
+
+
+class ScheduleBaseSerializer(EagerLoadingMixin, serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ organization = serializers.HiddenField(default=CurrentOrganizationDefault())
+ team = TeamPrimaryKeyRelatedField(allow_null=True, default=CurrentTeamDefault())
+ slack_channel = serializers.SerializerMethodField()
+ user_group = UserGroupSerializer()
+ warnings = serializers.SerializerMethodField()
+ on_call_now = serializers.SerializerMethodField()
+
+ class Meta:
+ fields = [
+ "id",
+ "organization",
+ "team",
+ "name",
+ "user_group",
+ "warnings",
+ "on_call_now",
+ "has_gaps",
+ "notify_oncall_shift_freq",
+ "notify_empty_oncall",
+ "mention_oncall_start",
+ "mention_oncall_next",
+ ]
+
+ SELECT_RELATED = ["organization"]
+
+ CANT_UPDATE_USER_GROUP_WARNING = (
+ "Cannot update the user group, make sure to grant user group modification rights to "
+ "non-admin users in Slack workspace settings"
+ )
+ SCHEDULE_HAS_GAPS_WARNING = "Schedule has unassigned time periods during next 7 days"
+ SCHEDULE_HAS_EMPTY_SHIFTS_WARNING = "Schedule has empty shifts during next 7 days"
+
+ def get_slack_channel(self, obj):
+ if obj.channel is None:
+ return None
+ return {
+ "display_name": obj.slack_channel_name,
+ "slack_id": obj.channel,
+ "id": obj.slack_channel_pk,
+ }
+
+ def get_warnings(self, obj):
+ can_update_user_groups = self.context.get("can_update_user_groups", False)
+ warnings = []
+ if obj.user_group and not can_update_user_groups:
+ warnings.append(self.CANT_UPDATE_USER_GROUP_WARNING)
+ if obj.has_gaps:
+ warnings.append(self.SCHEDULE_HAS_GAPS_WARNING)
+ if obj.has_empty_shifts:
+ warnings.append(self.SCHEDULE_HAS_EMPTY_SHIFTS_WARNING)
+ return warnings
+
+ def get_on_call_now(self, obj):
+ users_on_call = list_users_to_notify_from_ical(obj, timezone.datetime.now(timezone.utc))
+ if users_on_call is not None:
+ return [user.short() for user in users_on_call]
+ else:
+ return []
+
+ def validate(self, attrs):
+ if "slack_channel_id" in attrs:
+ slack_channel_id = attrs.pop("slack_channel_id", None)
+ attrs["channel"] = slack_channel_id.slack_id if slack_channel_id is not None else None
+ return attrs
+
+ def create(self, validated_data):
+ created_schedule = super().create(validated_data)
+ created_schedule.check_empty_shifts_for_next_week()
+ schedule_notify_about_empty_shifts_in_schedule.apply_async((created_schedule.pk,))
+ created_schedule.check_gaps_for_next_week()
+ schedule_notify_about_gaps_in_schedule.apply_async((created_schedule.pk,))
+ return created_schedule
diff --git a/engine/apps/api/serializers/schedule_calendar.py b/engine/apps/api/serializers/schedule_calendar.py
new file mode 100644
index 0000000000..6a231962f0
--- /dev/null
+++ b/engine/apps/api/serializers/schedule_calendar.py
@@ -0,0 +1,57 @@
+from rest_framework import serializers
+
+from apps.api.serializers.schedule_base import ScheduleBaseSerializer
+from apps.schedules.models import OnCallScheduleCalendar
+from apps.schedules.tasks import schedule_notify_about_empty_shifts_in_schedule, schedule_notify_about_gaps_in_schedule
+from apps.slack.models import SlackChannel, SlackUserGroup
+from common.api_helpers.custom_fields import OrganizationFilteredPrimaryKeyRelatedField
+from common.api_helpers.utils import validate_ical_url
+
+
+class ScheduleCalendarSerializer(ScheduleBaseSerializer):
+ time_zone = serializers.CharField(required=False)
+
+ class Meta:
+ model = OnCallScheduleCalendar
+ fields = [*ScheduleBaseSerializer.Meta.fields, "slack_channel", "time_zone", "ical_url_overrides"]
+
+ def validate_ical_url_overrides(self, url):
+ return validate_ical_url(url)
+
+
+class ScheduleCalendarCreateSerializer(ScheduleCalendarSerializer):
+ slack_channel_id = OrganizationFilteredPrimaryKeyRelatedField(
+ filter_field="slack_team_identity__organizations",
+ queryset=SlackChannel.objects,
+ required=False,
+ allow_null=True,
+ )
+ user_group = OrganizationFilteredPrimaryKeyRelatedField(
+ filter_field="slack_team_identity__organizations",
+ queryset=SlackUserGroup.objects,
+ required=False,
+ allow_null=True,
+ )
+
+ class Meta(ScheduleCalendarSerializer.Meta):
+ fields = [*ScheduleBaseSerializer.Meta.fields, "slack_channel_id", "time_zone", "ical_url_overrides"]
+ extra_kwargs = {
+ "ical_url_overrides": {"required": False, "allow_null": True},
+ }
+
+ def update(self, instance, validated_data):
+ old_ical_url_overrides = instance.ical_url_overrides
+ old_time_zone = instance.time_zone
+
+ updated_schedule = super().update(instance, validated_data)
+
+ updated_ical_url_overrides = updated_schedule.ical_url_overrides
+ updated_time_zone = updated_schedule.time_zone
+
+ if old_time_zone != updated_time_zone or old_ical_url_overrides != updated_ical_url_overrides:
+ updated_schedule.drop_cached_ical()
+ updated_schedule.check_empty_shifts_for_next_week()
+ updated_schedule.check_gaps_for_next_week()
+ schedule_notify_about_empty_shifts_in_schedule.apply_async((instance.pk,))
+ schedule_notify_about_gaps_in_schedule.apply_async((instance.pk,))
+ return updated_schedule
diff --git a/engine/apps/api/serializers/schedule_ical.py b/engine/apps/api/serializers/schedule_ical.py
new file mode 100644
index 0000000000..3852f6627c
--- /dev/null
+++ b/engine/apps/api/serializers/schedule_ical.py
@@ -0,0 +1,83 @@
+from apps.api.serializers.schedule_base import ScheduleBaseSerializer
+from apps.schedules.models import OnCallScheduleICal
+from apps.schedules.tasks import schedule_notify_about_empty_shifts_in_schedule, schedule_notify_about_gaps_in_schedule
+from apps.slack.models import SlackChannel, SlackUserGroup
+from common.api_helpers.custom_fields import OrganizationFilteredPrimaryKeyRelatedField
+from common.api_helpers.utils import validate_ical_url
+
+
+class ScheduleICalSerializer(ScheduleBaseSerializer):
+ class Meta:
+ model = OnCallScheduleICal
+ fields = [
+ *ScheduleBaseSerializer.Meta.fields,
+ "ical_url_primary",
+ "ical_url_overrides",
+ "slack_channel",
+ ]
+
+ def validate_ical_url_primary(self, url):
+ return validate_ical_url(url)
+
+ def validate_ical_url_overrides(self, url):
+ return validate_ical_url(url)
+
+
+class ScheduleICalCreateSerializer(ScheduleICalSerializer):
+ slack_channel_id = OrganizationFilteredPrimaryKeyRelatedField(
+ filter_field="slack_team_identity__organizations",
+ queryset=SlackChannel.objects,
+ required=False,
+ allow_null=True,
+ )
+ user_group = OrganizationFilteredPrimaryKeyRelatedField(
+ filter_field="slack_team_identity__organizations",
+ queryset=SlackUserGroup.objects,
+ required=False,
+ allow_null=True,
+ )
+
+ class Meta:
+ model = OnCallScheduleICal
+ fields = [
+ *ScheduleBaseSerializer.Meta.fields,
+ "ical_url_primary",
+ "ical_url_overrides",
+ "slack_channel_id",
+ ]
+ extra_kwargs = {
+ "ical_url_primary": {"required": True, "allow_null": False},
+ "ical_url_overrides": {"required": False, "allow_null": True},
+ }
+
+
+class ScheduleICalUpdateSerializer(ScheduleICalCreateSerializer):
+ class Meta:
+ model = OnCallScheduleICal
+ fields = [
+ *ScheduleBaseSerializer.Meta.fields,
+ "ical_url_primary",
+ "ical_url_overrides",
+ "slack_channel_id",
+ ]
+ extra_kwargs = {
+ "ical_url_primary": {"required": False, "allow_null": False},
+ "ical_url_overrides": {"required": False, "allow_null": True},
+ }
+
+ def update(self, instance, validated_data):
+ old_ical_url_primary = instance.ical_url_primary
+ old_ical_url_overrides = instance.ical_url_overrides
+
+ updated_schedule = super().update(instance, validated_data)
+
+ updated_ical_url_primary = updated_schedule.ical_url_primary
+ updated_ical_url_overrides = updated_schedule.ical_url_overrides
+
+ if old_ical_url_primary != updated_ical_url_primary or old_ical_url_overrides != updated_ical_url_overrides:
+ updated_schedule.drop_cached_ical()
+ updated_schedule.check_empty_shifts_for_next_week()
+ updated_schedule.check_gaps_for_next_week()
+ schedule_notify_about_empty_shifts_in_schedule.apply_async((instance.pk,))
+ schedule_notify_about_gaps_in_schedule.apply_async((instance.pk,))
+ return updated_schedule
diff --git a/engine/apps/api/serializers/schedule_polymorphic.py b/engine/apps/api/serializers/schedule_polymorphic.py
new file mode 100644
index 0000000000..b9d404ed77
--- /dev/null
+++ b/engine/apps/api/serializers/schedule_polymorphic.py
@@ -0,0 +1,42 @@
+from rest_polymorphic.serializers import PolymorphicSerializer
+
+from apps.api.serializers.schedule_calendar import ScheduleCalendarCreateSerializer, ScheduleCalendarSerializer
+from apps.api.serializers.schedule_ical import (
+ ScheduleICalCreateSerializer,
+ ScheduleICalSerializer,
+ ScheduleICalUpdateSerializer,
+)
+from apps.schedules.models import OnCallScheduleCalendar, OnCallScheduleICal
+from common.api_helpers.mixins import EagerLoadingMixin
+
+
+class PolymorphicScheduleSerializer(EagerLoadingMixin, PolymorphicSerializer):
+ SELECT_RELATED = ["organization"]
+
+ resource_type_field_name = "type"
+
+ model_serializer_mapping = {
+ OnCallScheduleICal: ScheduleICalSerializer,
+ OnCallScheduleCalendar: ScheduleCalendarSerializer,
+ }
+
+ SCHEDULE_CLASS_TO_TYPE = {OnCallScheduleCalendar: 0, OnCallScheduleICal: 1}
+
+ def to_resource_type(self, model_or_instance):
+ return self.SCHEDULE_CLASS_TO_TYPE.get(model_or_instance._meta.model)
+
+
+class PolymorphicScheduleCreateSerializer(PolymorphicScheduleSerializer):
+
+ model_serializer_mapping = {
+ OnCallScheduleICal: ScheduleICalCreateSerializer,
+ OnCallScheduleCalendar: ScheduleCalendarCreateSerializer,
+ }
+
+
+class PolymorphicScheduleUpdateSerializer(PolymorphicScheduleSerializer):
+ model_serializer_mapping = {
+ OnCallScheduleICal: ScheduleICalUpdateSerializer,
+ # There is no difference between create and Update serializers for ScheduleCalendar
+ OnCallScheduleCalendar: ScheduleCalendarCreateSerializer,
+ }
diff --git a/engine/apps/api/serializers/schedule_reminder.py b/engine/apps/api/serializers/schedule_reminder.py
new file mode 100644
index 0000000000..a6fbad4094
--- /dev/null
+++ b/engine/apps/api/serializers/schedule_reminder.py
@@ -0,0 +1,15 @@
+from rest_framework import serializers
+
+from apps.schedules.models import OnCallSchedule
+
+
+class ScheduleReminderSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = OnCallSchedule
+ fields = [
+ "id",
+ "notify_oncall_shift_freq",
+ "mention_oncall_start",
+ "mention_oncall_next",
+ "notify_empty_oncall",
+ ]
diff --git a/engine/apps/api/serializers/slack_channel.py b/engine/apps/api/serializers/slack_channel.py
new file mode 100644
index 0000000000..4447dd8ae7
--- /dev/null
+++ b/engine/apps/api/serializers/slack_channel.py
@@ -0,0 +1,13 @@
+from rest_framework import serializers
+
+from apps.slack.models import SlackChannel
+
+
+class SlackChannelSerializer(serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ display_name = serializers.CharField(source="name")
+
+ class Meta:
+ model = SlackChannel
+ fields = ["id", "display_name", "slack_id"]
+ read_only_fields = ["id", "display_name", "slack_id"]
diff --git a/engine/apps/api/serializers/slack_user_identity.py b/engine/apps/api/serializers/slack_user_identity.py
new file mode 100644
index 0000000000..be184cef89
--- /dev/null
+++ b/engine/apps/api/serializers/slack_user_identity.py
@@ -0,0 +1,18 @@
+from rest_framework import serializers
+
+from apps.slack.models import SlackUserIdentity
+
+
+class SlackUserIdentitySerializer(serializers.ModelSerializer):
+ slack_login = serializers.CharField(read_only=True, source="cached_slack_login")
+ avatar = serializers.CharField(read_only=True, source="cached_avatar")
+ name = serializers.CharField(read_only=True, source="cached_name")
+ display_name = serializers.SerializerMethodField()
+
+ class Meta:
+ model = SlackUserIdentity
+ fields = ["slack_login", "slack_id", "avatar", "name", "display_name"]
+ read_only_fields = ["slack_login", "slack_id", "avatar", "name", "display_name"]
+
+ def get_display_name(self, obj):
+ return obj.profile_display_name or obj.slack_verbal
diff --git a/engine/apps/api/serializers/team.py b/engine/apps/api/serializers/team.py
new file mode 100644
index 0000000000..dcccf527c8
--- /dev/null
+++ b/engine/apps/api/serializers/team.py
@@ -0,0 +1,16 @@
+from rest_framework import serializers
+
+from apps.user_management.models import Team
+
+
+class TeamSerializer(serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+
+ class Meta:
+ model = Team
+ fields = (
+ "id",
+ "name",
+ "email",
+ "avatar_url",
+ )
diff --git a/engine/apps/api/serializers/telegram.py b/engine/apps/api/serializers/telegram.py
new file mode 100644
index 0000000000..3d8e0a2452
--- /dev/null
+++ b/engine/apps/api/serializers/telegram.py
@@ -0,0 +1,24 @@
+from rest_framework import serializers
+
+from apps.telegram.models import TelegramToOrganizationConnector, TelegramToUserConnector
+
+
+class TelegramToUserConnectorSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = TelegramToUserConnector
+ fields = ["telegram_nick_name", "telegram_chat_id"]
+
+
+class TelegramToOrganizationConnectorSerializer(serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+
+ class Meta:
+ model = TelegramToOrganizationConnector
+ fields = [
+ "id",
+ "channel_chat_id",
+ "channel_name",
+ "discussion_group_chat_id",
+ "discussion_group_name",
+ "is_default_channel",
+ ]
diff --git a/engine/apps/api/serializers/user.py b/engine/apps/api/serializers/user.py
new file mode 100644
index 0000000000..e9ec91b2dc
--- /dev/null
+++ b/engine/apps/api/serializers/user.py
@@ -0,0 +1,136 @@
+from rest_framework import serializers
+
+from apps.api.serializers.telegram import TelegramToUserConnectorSerializer
+from apps.base.constants import ADMIN_PERMISSIONS, ALL_ROLES_PERMISSIONS, EDITOR_PERMISSIONS
+from apps.base.messaging import get_messaging_backends
+from apps.base.models import UserNotificationPolicy
+from apps.twilioapp.utils import check_phone_number_is_valid
+from apps.user_management.models import User
+from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField
+from common.api_helpers.mixins import EagerLoadingMixin
+from common.constants.role import Role
+
+from .custom_serializers import DynamicFieldsModelSerializer
+from .organization import FastOrganizationSerializer
+from .slack_user_identity import SlackUserIdentitySerializer
+
+
+class UserSerializer(DynamicFieldsModelSerializer, EagerLoadingMixin):
+ pk = serializers.CharField(read_only=True, source="public_primary_key")
+ slack_user_identity = SlackUserIdentitySerializer(read_only=True)
+
+ telegram_configuration = TelegramToUserConnectorSerializer(source="telegram_connection", read_only=True)
+
+ messaging_backends = serializers.SerializerMethodField()
+
+ organization = FastOrganizationSerializer(read_only=True)
+ current_team = TeamPrimaryKeyRelatedField(allow_null=True, required=False)
+
+ avatar = serializers.URLField(source="avatar_url", read_only=True)
+
+ permissions = serializers.SerializerMethodField()
+ notification_chain_verbal = serializers.SerializerMethodField()
+
+ SELECT_RELATED = ["telegram_verification_code", "telegram_connection", "organization", "slack_user_identity"]
+
+ class Meta:
+ model = User
+ fields = [
+ "pk",
+ "organization",
+ "current_team",
+ "email",
+ "username",
+ "role",
+ "avatar",
+ "unverified_phone_number",
+ "verified_phone_number",
+ "slack_user_identity",
+ "telegram_configuration",
+ "messaging_backends",
+ "permissions",
+ "notification_chain_verbal",
+ ]
+ read_only_fields = [
+ "email",
+ "username",
+ "role",
+ "verified_phone_number",
+ ]
+
+ def validate_unverified_phone_number(self, value):
+ if value:
+ if check_phone_number_is_valid(value):
+ return value
+ else:
+ raise serializers.ValidationError(
+ "Phone number must be entered in the format: '+999999999'. From 8 to 15 digits allowed."
+ )
+ else:
+ return None
+
+ def get_messaging_backends(self, obj):
+ serialized_data = {}
+ supported_backends = get_messaging_backends()
+ for backend_id, backend in supported_backends:
+ serialized_data[backend_id] = backend.serialize_user(obj)
+ return serialized_data
+
+ def get_permissions(self, obj):
+ if obj.role == Role.ADMIN:
+ return ADMIN_PERMISSIONS
+ elif obj.role == Role.EDITOR:
+ return EDITOR_PERMISSIONS
+ else:
+ return ALL_ROLES_PERMISSIONS
+
+ def get_notification_chain_verbal(self, obj):
+ default, important = UserNotificationPolicy.get_short_verbals_for_user(user=obj)
+ return {"default": " - ".join(default), "important": " - ".join(important)}
+
+
+class UserHiddenFieldsSerializer(UserSerializer):
+ available_for_all_roles_fields = [
+ "pk",
+ "organization",
+ "current_team",
+ "username",
+ "avatar",
+ "notification_chain_verbal",
+ "permissions",
+ ]
+
+ def to_representation(self, instance):
+ ret = super(UserSerializer, self).to_representation(instance)
+ for field in ret:
+ if field not in self.available_for_all_roles_fields:
+ ret[field] = "******"
+ return ret
+
+
+class FastUserSerializer(serializers.ModelSerializer):
+ pk = serializers.CharField(source="public_primary_key")
+
+ class Meta:
+ model = User
+ fields = [
+ "pk",
+ "username",
+ ]
+ read_only_fields = [
+ "pk",
+ "username",
+ ]
+
+
+class FilterUserSerializer(EagerLoadingMixin, serializers.ModelSerializer):
+ value = serializers.CharField(source="public_primary_key")
+ display_name = serializers.CharField(source="username")
+
+ class Meta:
+ model = User
+ fields = ["value", "display_name"]
+ read_only_fields = [
+ "pk",
+ "username",
+ ]
diff --git a/engine/apps/api/serializers/user_group.py b/engine/apps/api/serializers/user_group.py
new file mode 100644
index 0000000000..eee9c4ab75
--- /dev/null
+++ b/engine/apps/api/serializers/user_group.py
@@ -0,0 +1,11 @@
+from rest_framework import serializers
+
+from apps.slack.models import SlackUserGroup
+
+
+class UserGroupSerializer(serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+
+ class Meta:
+ model = SlackUserGroup
+ fields = ("id", "name", "handle")
diff --git a/engine/apps/api/serializers/user_notification_policy.py b/engine/apps/api/serializers/user_notification_policy.py
new file mode 100644
index 0000000000..383414290d
--- /dev/null
+++ b/engine/apps/api/serializers/user_notification_policy.py
@@ -0,0 +1,134 @@
+import time
+from datetime import timedelta
+
+from rest_framework import serializers
+
+from apps.base.models import UserNotificationPolicy
+from apps.base.models.user_notification_policy import NotificationChannelAPIOptions
+from apps.user_management.models import User
+from common.api_helpers.custom_fields import OrganizationFilteredPrimaryKeyRelatedField
+from common.api_helpers.exceptions import BadRequest, Forbidden
+from common.api_helpers.mixins import EagerLoadingMixin
+
+
+# This serializer should not be user directly
+class UserNotificationPolicyBaseSerializer(EagerLoadingMixin, serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ notify_by = serializers.ChoiceField(
+ read_only=False,
+ required=False,
+ default=UserNotificationPolicy.NotificationChannel.SLACK,
+ choices=NotificationChannelAPIOptions.AVAILABLE_FOR_USE,
+ )
+ step = serializers.ChoiceField(
+ read_only=False,
+ required=False,
+ default=UserNotificationPolicy.Step.NOTIFY,
+ choices=UserNotificationPolicy.Step.choices,
+ )
+
+ SELECT_RELATED = [
+ "user",
+ ]
+
+ class Meta:
+ model = UserNotificationPolicy
+ fields = ["id", "step", "order", "notify_by", "wait_delay", "important", "user"]
+
+ def to_internal_value(self, data):
+ if data.get("wait_delay", None):
+ try:
+ time.strptime(data["wait_delay"], "%H:%M:%S")
+ except ValueError:
+ try:
+ data["wait_delay"] = str(timedelta(seconds=float(data["wait_delay"])))
+ except ValueError:
+ raise serializers.ValidationError("Invalid wait delay format")
+ data = self._notify_by_to_internal_value(data)
+ return super().to_internal_value(data)
+
+ def to_representation(self, instance):
+ result = super().to_representation(instance)
+ result = self._notify_by_to_representation(instance, result)
+ return result
+
+ # _notify_by_to_internal_value and _notify_by_to_representation are exists because of in EscalationPolicy model
+ # notify_by field has default value NotificationChannel.SLACK and not nullable
+ # We don't want any notify_by value in response if step != Step.NOTIFY
+ def _notify_by_to_internal_value(self, data):
+ if not data.get("notify_by", None):
+ data["notify_by"] = UserNotificationPolicy.NotificationChannel.SLACK
+ return data
+
+ def _notify_by_to_representation(self, instance, result):
+ if instance.step != UserNotificationPolicy.Step.NOTIFY:
+ result["notify_by"] = None
+ return result
+
+
+class UserNotificationPolicySerializer(UserNotificationPolicyBaseSerializer):
+ prev_step = serializers.CharField(required=False, write_only=True, allow_null=True)
+ user = OrganizationFilteredPrimaryKeyRelatedField(
+ queryset=User.objects,
+ required=False,
+ allow_null=True,
+ many=False,
+ display_func=lambda instance: instance.username,
+ )
+ notify_by = serializers.ChoiceField(
+ choices=NotificationChannelAPIOptions.AVAILABLE_FOR_USE,
+ default=NotificationChannelAPIOptions.DEFAULT_NOTIFICATION_CHANNEL,
+ )
+
+ class Meta(UserNotificationPolicyBaseSerializer.Meta):
+ fields = [*UserNotificationPolicyBaseSerializer.Meta.fields, "prev_step"]
+ read_only_fields = ("order",)
+
+ def create(self, validated_data):
+ prev_step = validated_data.pop("prev_step", None)
+
+ user = validated_data.get("user")
+ organization = self.context["request"].auth.organization
+
+ if not user:
+ user = self.context["request"].user
+
+ self_or_admin = user.self_or_admin(user_to_check=self.context["request"].user, organization=organization)
+ if not self_or_admin:
+ raise Forbidden()
+
+ if prev_step is not None:
+ try:
+ prev_step = UserNotificationPolicy.objects.get(public_primary_key=prev_step)
+ except UserNotificationPolicy.DoesNotExist:
+ raise BadRequest(detail="Prev step does not exist")
+ if prev_step.user != user or prev_step.important != validated_data.get("important", False):
+ raise BadRequest(detail="UserNotificationPolicy can be created only with the same user and importance")
+ instance = UserNotificationPolicy.objects.create(**validated_data)
+ instance.to(prev_step.order + 1)
+ return instance
+ else:
+ instance = UserNotificationPolicy.objects.create(**validated_data)
+ return instance
+
+
+class UserNotificationPolicyUpdateSerializer(UserNotificationPolicyBaseSerializer):
+
+ user = OrganizationFilteredPrimaryKeyRelatedField(
+ many=False,
+ read_only=True,
+ display_func=lambda instance: instance.username,
+ )
+
+ class Meta(UserNotificationPolicyBaseSerializer.Meta):
+ read_only_fields = ("order", "user", "important")
+
+ def update(self, instance, validated_data):
+ self_or_admin = instance.user.self_or_admin(
+ user_to_check=self.context["request"].user, organization=self.context["request"].user.organization
+ )
+ if not self_or_admin:
+ raise Forbidden()
+ if validated_data.get("step") == UserNotificationPolicy.Step.WAIT and not validated_data.get("wait_delay"):
+ validated_data["wait_delay"] = UserNotificationPolicy.FIVE_MINUTES
+ return super().update(instance, validated_data)
diff --git a/engine/apps/api/tasks.py b/engine/apps/api/tasks.py
new file mode 100644
index 0000000000..4240178adb
--- /dev/null
+++ b/engine/apps/api/tasks.py
@@ -0,0 +1,55 @@
+from celery.utils.log import get_task_logger
+from django.apps import apps
+from django.conf import settings
+from django.core.cache import cache
+
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+logger = get_task_logger(__name__)
+
+
+def get_cache_key_caching_alert_group_for_web(alert_group_pk):
+ CACHE_KEY_PREFIX = "cache_alert_group_for_web"
+ return f"{CACHE_KEY_PREFIX}_{alert_group_pk}"
+
+
+# TODO: remove this tasks after all of them will be processed in prod
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def schedule_cache_for_alert_group(alert_group_pk):
+ CACHE_FOR_ALERT_GROUP_LIFETIME = 60
+ START_CACHE_DELAY = 5 # we introduce delay to avoid recaching after each alert.
+
+ task = cache_alert_group_for_web.apply_async(args=[alert_group_pk], countdown=START_CACHE_DELAY)
+ cache_key = get_cache_key_caching_alert_group_for_web(alert_group_pk)
+ cache.set(cache_key, task.id, timeout=CACHE_FOR_ALERT_GROUP_LIFETIME)
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def cache_alert_group_for_web(alert_group_pk):
+ """
+ Async task to re-cache alert_group for web.
+ """
+ cache_key = get_cache_key_caching_alert_group_for_web(alert_group_pk)
+ cached_task_id = cache.get(cache_key)
+ current_task_id = cache_alert_group_for_web.request.id
+
+ if cached_task_id is None:
+ return (
+ f"cache_alert_group_for_web skipped, because of current task_id ({current_task_id})"
+ f" for alert_group {alert_group_pk} doesn't exist in cache, which means this task is not"
+ f" relevant: cache was dropped by engine restart ot CACHE_FOR_ALERT_GROUP_LIFETIME"
+ )
+ if not current_task_id == cached_task_id or cached_task_id is None:
+ return (
+ f"cache_alert_group_for_web skipped, because of current task_id ({current_task_id})"
+ f" doesn't equal to cached task_id ({cached_task_id}) for alert_group {alert_group_pk},"
+ )
+ else:
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ alert_group = AlertGroup.all_objects.using_readonly_db.get(pk=alert_group_pk)
+ alert_group.cache_for_web(alert_group.channel.organization)
+ logger.info(f"cache_alert_group_for_web: cache refreshed for alert_group {alert_group_pk}")
diff --git a/engine/apps/api/tests/__init__.py b/engine/apps/api/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/api/tests/conftest.py b/engine/apps/api/tests/conftest.py
new file mode 100644
index 0000000000..6fb534a190
--- /dev/null
+++ b/engine/apps/api/tests/conftest.py
@@ -0,0 +1,68 @@
+from datetime import timedelta
+
+import pytest
+from django.utils import timezone
+
+from apps.slack.scenarios.distribute_alerts import AlertShootingStep
+from apps.slack.slack_client import SlackClientWithErrorHandling
+
+
+@pytest.fixture()
+def mock_slack_api_call(monkeypatch):
+ def _mock_api_call(*args, **kwargs):
+ return {
+ "status": 200,
+ "user": {
+ "profile": {"image_512": "TEST_SLACK_IMAGE_URL"},
+ "name": "TEST_SLACK_LOGIN",
+ "real_name": "TEST_SLACK_NAME",
+ },
+ "team": {"name": "TEST_SLACK_TEAM_NAME"},
+ }
+
+ monkeypatch.setattr(SlackClientWithErrorHandling, "api_call", _mock_api_call)
+
+
+@pytest.fixture()
+def make_resolved_ack_new_silenced_alert_groups(make_alert_group, make_alert_receive_channel, make_alert):
+ def _make_alert_groups_all_statuses(alert_receive_channel, channel_filter, alert_raw_request_data, **kwargs):
+ resolved_alert_group = make_alert_group(
+ alert_receive_channel,
+ channel_filter=channel_filter,
+ acknowledged_at=timezone.now() + timedelta(hours=1),
+ resolved_at=timezone.now() + timedelta(hours=2),
+ resolved=True,
+ acknowledged=True,
+ )
+ make_alert(alert_group=resolved_alert_group, raw_request_data=alert_raw_request_data)
+
+ ack_alert_group = make_alert_group(
+ alert_receive_channel,
+ channel_filter=channel_filter,
+ acknowledged_at=timezone.now() + timedelta(hours=1),
+ acknowledged=True,
+ )
+ make_alert(alert_group=ack_alert_group, raw_request_data=alert_raw_request_data)
+
+ new_alert_group = make_alert_group(alert_receive_channel, channel_filter=channel_filter)
+ make_alert(alert_group=new_alert_group, raw_request_data=alert_raw_request_data)
+
+ silenced_alert_group = make_alert_group(
+ alert_receive_channel,
+ channel_filter=channel_filter,
+ silenced=True,
+ silenced_at=timezone.now() + timedelta(hours=1),
+ )
+ make_alert(alert_group=silenced_alert_group, raw_request_data=alert_raw_request_data)
+
+ return resolved_alert_group, ack_alert_group, new_alert_group, silenced_alert_group
+
+ return _make_alert_groups_all_statuses
+
+
+@pytest.fixture()
+def mock_alert_shooting_step_publish_slack_messages(monkeypatch):
+ def mock_publish_slack_messages(*args, **kwargs):
+ return None
+
+ monkeypatch.setattr(AlertShootingStep, "publish_slack_messages", mock_publish_slack_messages)
diff --git a/engine/apps/api/tests/test_alert_group.py b/engine/apps/api/tests/test_alert_group.py
new file mode 100644
index 0000000000..983a22bf73
--- /dev/null
+++ b/engine/apps/api/tests/test_alert_group.py
@@ -0,0 +1,1471 @@
+import datetime
+from unittest.mock import patch
+
+import pytest
+from django.urls import reverse
+from django.utils import timezone
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.test import APIClient
+
+from apps.alerts.models import AlertGroup, AlertGroupLogRecord
+from common.constants.role import Role
+
+alert_raw_request_data = {
+ "evalMatches": [
+ {"value": 100, "metric": "High value", "tags": None},
+ {"value": 200, "metric": "Higher Value", "tags": None},
+ ],
+ "message": "Someone is testing the alert notification within grafana.",
+ "ruleId": 0,
+ "ruleName": "Test notification",
+ "ruleUrl": "http://localhost:3000/",
+ "state": "alerting",
+ "title": "[Alerting] Test notification",
+}
+
+
+# # This function is for creating token and do not to change fixture alert_group_internal_api_setup return values.
+# # To create token amixr team is needed but in most tests using fixture alert_group_internal_api_setup team is redundant
+# # So it just extract amixr team form alert_groups.
+# def create_token_from_initial_test_data(make_func, alert_groups, role):
+# organization = alert_groups[0].channel.organization
+# _, token_user_role = make_func(organization, role)
+# return token_user_role
+
+
+@pytest.fixture()
+def alert_group_internal_api_setup(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_resolved_ack_new_silenced_alert_groups,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+ alert_groups = make_resolved_ack_new_silenced_alert_groups(
+ alert_receive_channel, default_channel_filter, alert_raw_request_data
+ )
+ return user, token, alert_groups
+
+
+@pytest.mark.django_db
+def test_get_filter_started_at(alert_group_internal_api_setup, make_user_auth_headers):
+ user, token, alert_groups = alert_group_internal_api_setup
+ client = APIClient()
+
+ url = reverse("api-internal:alertgroup-list")
+ response = client.get(
+ url + f"?started_at=1970-01-01T00:00:00/2099-01-01T23:59:59",
+ format="json",
+ **make_user_auth_headers(user, token),
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["count"] == 4
+
+
+@pytest.mark.django_db
+def test_get_filter_resolved_at_alertgroup_empty_result(alert_group_internal_api_setup, make_user_auth_headers):
+ client = APIClient()
+ user, token, alert_groups = alert_group_internal_api_setup
+
+ url = reverse("api-internal:alertgroup-list")
+ response = client.get(
+ url + "?resolved_at=1970-01-01T00:00:00/1970-01-01T23:59:59",
+ format="json",
+ **make_user_auth_headers(user, token),
+ )
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["count"] == 0
+
+
+@pytest.mark.django_db
+def test_get_filter_resolved_at_alertgroup_invalid_format(alert_group_internal_api_setup, make_user_auth_headers):
+ client = APIClient()
+ user, token, alert_groups = alert_group_internal_api_setup
+
+ url = reverse("api-internal:alertgroup-list")
+ response = client.get(
+ url + "?resolved_at=invalid_date_format", format="json", **make_user_auth_headers(user, token)
+ )
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_get_filter_resolved_at(alert_group_internal_api_setup, make_user_auth_headers):
+ client = APIClient()
+ user, token, alert_groups = alert_group_internal_api_setup
+
+ url = reverse("api-internal:alertgroup-list")
+ response = client.get(
+ url + "?resolved_at=1970-01-01T00:00:00/2099-01-01T23:59:59",
+ format="json",
+ **make_user_auth_headers(user, token),
+ )
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["count"] == 1
+
+
+@pytest.mark.django_db
+def test_status_new(alert_group_internal_api_setup, make_user_auth_headers):
+ client = APIClient()
+ user, token, alert_groups = alert_group_internal_api_setup
+ _, _, new_alert_group, _ = alert_groups
+
+ url = reverse("api-internal:alertgroup-list")
+ response = client.get(url + "?status=0", format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["count"] == 1
+ assert response.data["results"][0]["pk"] == new_alert_group.public_primary_key
+
+
+@pytest.mark.django_db
+def test_status_ack(alert_group_internal_api_setup, make_user_auth_headers):
+ client = APIClient()
+ user, token, alert_groups = alert_group_internal_api_setup
+ _, ack_alert_group, _, _ = alert_groups
+
+ url = reverse("api-internal:alertgroup-list")
+ response = client.get(url + "?status=1", format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["count"] == 1
+ assert response.data["results"][0]["pk"] == ack_alert_group.public_primary_key
+
+
+@pytest.mark.django_db
+def test_status_resolved(alert_group_internal_api_setup, make_user_auth_headers):
+ client = APIClient()
+ user, token, alert_groups = alert_group_internal_api_setup
+ resolved_alert_group, _, _, _ = alert_groups
+
+ url = reverse("api-internal:alertgroup-list")
+ response = client.get(url + "?status=2", format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["count"] == 1
+ assert response.data["results"][0]["pk"] == resolved_alert_group.public_primary_key
+
+
+@pytest.mark.django_db
+def test_status_silenced(alert_group_internal_api_setup, make_user_auth_headers):
+ client = APIClient()
+ user, token, alert_groups = alert_group_internal_api_setup
+ _, _, _, silenced_alert_group = alert_groups
+
+ url = reverse("api-internal:alertgroup-list")
+ response = client.get(url + "?status=3", format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["count"] == 1
+ assert response.data["results"][0]["pk"] == silenced_alert_group.public_primary_key
+
+
+@pytest.mark.django_db
+def test_all_statuses(alert_group_internal_api_setup, make_user_auth_headers):
+ client = APIClient()
+ user, token, alert_groups = alert_group_internal_api_setup
+ resolved_alert_group, _, _, _ = alert_groups
+
+ url = reverse("api-internal:alertgroup-list")
+ response = client.get(
+ url + "?status=0&status=1&&status=2&status=3", format="json", **make_user_auth_headers(user, token)
+ )
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["count"] == 4
+
+
+@pytest.mark.django_db
+def test_get_filter_resolved_by(
+ make_organization_and_user_with_plugin_token,
+ make_user_for_organization,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_alert_group,
+ make_alert,
+ make_resolved_ack_new_silenced_alert_groups,
+ make_user_auth_headers,
+):
+ client = APIClient()
+
+ organization, first_user, token = make_organization_and_user_with_plugin_token()
+ second_user = make_user_for_organization(organization)
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+
+ resolved_alert_group = make_alert_group(
+ alert_receive_channel,
+ channel_filter=default_channel_filter,
+ acknowledged_at=timezone.now() + datetime.timedelta(hours=1),
+ resolved_at=timezone.now() + datetime.timedelta(hours=2),
+ resolved=True,
+ acknowledged=True,
+ resolved_by_user=first_user,
+ acknowledged_by_user=second_user,
+ )
+ make_alert(alert_group=resolved_alert_group, raw_request_data=alert_raw_request_data)
+
+ url = reverse("api-internal:alertgroup-list")
+
+ first_response = client.get(
+ url + f"?resolved_by={first_user.public_primary_key}",
+ format="json",
+ **make_user_auth_headers(first_user, token),
+ )
+ assert first_response.status_code == status.HTTP_200_OK
+ assert first_response.data["count"] == 1
+
+ second_response = client.get(
+ url + f"?resolved_by={second_user.public_primary_key}",
+ format="json",
+ **make_user_auth_headers(first_user, token),
+ )
+ assert second_response.status_code == status.HTTP_200_OK
+ assert second_response.data["count"] == 0
+
+
+@pytest.mark.django_db
+def test_get_filter_resolved_by_multiple_values(
+ make_organization_and_user_with_plugin_token,
+ make_user_for_organization,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_alert_group,
+ make_alert,
+ make_resolved_ack_new_silenced_alert_groups,
+ make_user_auth_headers,
+):
+ client = APIClient()
+
+ organization, first_user, token = make_organization_and_user_with_plugin_token()
+ second_user = make_user_for_organization(organization)
+ third_user = make_user_for_organization(organization)
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+
+ def make_resolved_by_user_alert_group(user):
+ resolved_alert_group = make_alert_group(
+ alert_receive_channel,
+ channel_filter=default_channel_filter,
+ acknowledged_at=timezone.now() + datetime.timedelta(hours=1),
+ resolved_at=timezone.now() + datetime.timedelta(hours=2),
+ resolved=True,
+ acknowledged=True,
+ resolved_by_user=user,
+ acknowledged_by_user=user,
+ )
+ make_alert(alert_group=resolved_alert_group, raw_request_data=alert_raw_request_data)
+
+ make_resolved_by_user_alert_group(first_user)
+ make_resolved_by_user_alert_group(second_user)
+ make_resolved_by_user_alert_group(third_user)
+
+ url = reverse("api-internal:alertgroup-list")
+
+ first_response = client.get(
+ url + f"?resolved_by={first_user.public_primary_key}&" f"resolved_by={second_user.public_primary_key}",
+ format="json",
+ **make_user_auth_headers(first_user, token),
+ )
+ assert first_response.status_code == status.HTTP_200_OK
+ assert first_response.data["count"] == 2
+
+
+@pytest.mark.django_db
+def test_get_filter_acknowledged_by(
+ make_organization_and_user_with_plugin_token,
+ make_user_for_organization,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_alert_group,
+ make_alert,
+ make_resolved_ack_new_silenced_alert_groups,
+ make_user_auth_headers,
+):
+ client = APIClient()
+
+ organization, first_user, token = make_organization_and_user_with_plugin_token()
+ second_user = make_user_for_organization(organization)
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+
+ acknowledged_alert_group = make_alert_group(
+ alert_receive_channel,
+ channel_filter=default_channel_filter,
+ acknowledged_at=timezone.now() + datetime.timedelta(hours=1),
+ resolved_at=timezone.now() + datetime.timedelta(hours=2),
+ acknowledged=True,
+ acknowledged_by_user=first_user,
+ )
+ make_alert(alert_group=acknowledged_alert_group, raw_request_data=alert_raw_request_data)
+
+ url = reverse("api-internal:alertgroup-list")
+
+ first_response = client.get(
+ url + f"?acknowledged_by={first_user.public_primary_key}",
+ format="json",
+ **make_user_auth_headers(first_user, token),
+ )
+ assert first_response.status_code == status.HTTP_200_OK
+ assert first_response.data["count"] == 1
+
+ second_response = client.get(
+ url + f"?acknowledged_by={second_user.public_primary_key}",
+ format="json",
+ **make_user_auth_headers(first_user, token),
+ )
+ assert second_response.status_code == status.HTTP_200_OK
+ assert second_response.data["count"] == 0
+
+
+@pytest.mark.django_db
+def test_get_filter_acknowledged_by_multiple_values(
+ make_organization_and_user_with_plugin_token,
+ make_user_for_organization,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_alert_group,
+ make_alert,
+ make_resolved_ack_new_silenced_alert_groups,
+ make_user_auth_headers,
+):
+ client = APIClient()
+
+ organization, first_user, token = make_organization_and_user_with_plugin_token()
+ second_user = make_user_for_organization(organization)
+ third_user = make_user_for_organization(organization)
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+
+ def make_acknowledged_by_user_alert_group(user):
+ acknowledged_alert_group = make_alert_group(
+ alert_receive_channel,
+ channel_filter=default_channel_filter,
+ acknowledged_at=timezone.now() + datetime.timedelta(hours=1),
+ resolved_at=timezone.now() + datetime.timedelta(hours=2),
+ acknowledged=True,
+ acknowledged_by_user=user,
+ )
+ make_alert(alert_group=acknowledged_alert_group, raw_request_data=alert_raw_request_data)
+
+ make_acknowledged_by_user_alert_group(first_user)
+ make_acknowledged_by_user_alert_group(second_user)
+ make_acknowledged_by_user_alert_group(third_user)
+
+ url = reverse("api-internal:alertgroup-list")
+
+ first_response = client.get(
+ url + f"?acknowledged_by={first_user.public_primary_key}" f"&acknowledged_by={second_user.public_primary_key}",
+ format="json",
+ **make_user_auth_headers(first_user, token),
+ )
+ assert first_response.status_code == status.HTTP_200_OK
+ assert first_response.data["count"] == 2
+
+
+@pytest.mark.django_db
+def test_get_filter_silenced_by(
+ make_organization_and_user_with_plugin_token,
+ make_user_for_organization,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_alert_group,
+ make_alert,
+ make_resolved_ack_new_silenced_alert_groups,
+ make_user_auth_headers,
+):
+ client = APIClient()
+
+ organization, first_user, token = make_organization_and_user_with_plugin_token()
+ second_user = make_user_for_organization(organization)
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+
+ silenced_alert_group = make_alert_group(
+ alert_receive_channel,
+ channel_filter=default_channel_filter,
+ silenced_at=timezone.now() + datetime.timedelta(hours=1),
+ silenced=True,
+ silenced_by_user=first_user,
+ )
+ make_alert(alert_group=silenced_alert_group, raw_request_data=alert_raw_request_data)
+
+ url = reverse("api-internal:alertgroup-list")
+
+ first_response = client.get(
+ url + f"?silenced_by={first_user.public_primary_key}",
+ format="json",
+ **make_user_auth_headers(first_user, token),
+ )
+ assert first_response.status_code == status.HTTP_200_OK
+ assert first_response.data["count"] == 1
+
+ second_response = client.get(
+ url + f"?silenced_by={second_user.public_primary_key}",
+ format="json",
+ **make_user_auth_headers(first_user, token),
+ )
+ assert second_response.status_code == status.HTTP_200_OK
+ assert second_response.data["count"] == 0
+
+
+@pytest.mark.django_db
+def test_get_filter_silenced_by_multiple_values(
+ make_organization_and_user_with_plugin_token,
+ make_user_for_organization,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_alert_group,
+ make_alert,
+ make_resolved_ack_new_silenced_alert_groups,
+ make_user_auth_headers,
+):
+ client = APIClient()
+
+ organization, first_user, token = make_organization_and_user_with_plugin_token()
+ second_user = make_user_for_organization(organization)
+ third_user = make_user_for_organization(organization)
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+
+ def make_silenced_by_user_alert_group(user):
+ acknowledged_alert_group = make_alert_group(
+ alert_receive_channel,
+ channel_filter=default_channel_filter,
+ silenced_at=timezone.now() + datetime.timedelta(hours=1),
+ silenced=True,
+ silenced_by_user=user,
+ )
+ make_alert(alert_group=acknowledged_alert_group, raw_request_data=alert_raw_request_data)
+
+ make_silenced_by_user_alert_group(first_user)
+ make_silenced_by_user_alert_group(second_user)
+ make_silenced_by_user_alert_group(third_user)
+
+ url = reverse("api-internal:alertgroup-list")
+
+ first_response = client.get(
+ url + f"?silenced_by={first_user.public_primary_key}&silenced_by={second_user.public_primary_key}",
+ format="json",
+ **make_user_auth_headers(first_user, token),
+ )
+ assert first_response.status_code == status.HTTP_200_OK
+ assert first_response.data["count"] == 2
+
+
+@pytest.mark.django_db
+def test_get_filter_invitees_are(
+ make_organization_and_user_with_plugin_token,
+ make_user_for_organization,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_alert_group,
+ make_alert,
+ make_user_auth_headers,
+):
+ client = APIClient()
+
+ organization, first_user, token = make_organization_and_user_with_plugin_token()
+ second_user = make_user_for_organization(organization)
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+
+ alert_group = make_alert_group(
+ alert_receive_channel,
+ channel_filter=default_channel_filter,
+ )
+ make_alert(alert_group=alert_group, raw_request_data={})
+ alert_group.log_records.create(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ author=first_user,
+ )
+
+ url = reverse("api-internal:alertgroup-list")
+
+ first_response = client.get(
+ url + f"?invitees_are={first_user.public_primary_key}",
+ format="json",
+ **make_user_auth_headers(first_user, token),
+ )
+ assert first_response.status_code == status.HTTP_200_OK
+ assert first_response.data["count"] == 1
+
+ second_response = client.get(
+ url + f"?invitees_are={second_user.public_primary_key}",
+ format="json",
+ **make_user_auth_headers(first_user, token),
+ )
+ assert second_response.status_code == status.HTTP_200_OK
+ assert second_response.data["count"] == 0
+
+
+@pytest.mark.django_db
+def test_get_filter_invitees_are_multiple_values(
+ make_organization_and_user_with_plugin_token,
+ make_user_for_organization,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_alert_group,
+ make_alert,
+ make_user_auth_headers,
+):
+ client = APIClient()
+
+ organization, first_user, token = make_organization_and_user_with_plugin_token()
+ second_user = make_user_for_organization(organization)
+ third_user = make_user_for_organization(organization)
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+
+ def make_alert_group_with_invitee(user):
+ alert_group = make_alert_group(
+ alert_receive_channel,
+ channel_filter=default_channel_filter,
+ )
+ make_alert(alert_group=alert_group, raw_request_data={})
+
+ alert_group.log_records.create(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ author=user,
+ )
+
+ make_alert_group_with_invitee(first_user)
+ make_alert_group_with_invitee(second_user)
+ make_alert_group_with_invitee(third_user)
+
+ url = reverse("api-internal:alertgroup-list")
+
+ first_response = client.get(
+ url + f"?invitees_are={first_user.public_primary_key}" f"&invitees_are={second_user.public_primary_key}",
+ format="json",
+ **make_user_auth_headers(first_user, token),
+ )
+ assert first_response.status_code == status.HTTP_200_OK
+ assert first_response.data["count"] == 2
+
+
+@pytest.mark.django_db
+def test_get_filter_invitees_are_ag_with_multiple_logs(
+ make_organization_and_user_with_plugin_token,
+ make_user_for_organization,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_alert_group,
+ make_alert,
+ make_user_auth_headers,
+):
+ client = APIClient()
+
+ organization, first_user, token = make_organization_and_user_with_plugin_token()
+ second_user = make_user_for_organization(organization)
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+
+ alert_group = make_alert_group(
+ alert_receive_channel,
+ channel_filter=default_channel_filter,
+ )
+ make_alert(alert_group=alert_group, raw_request_data={})
+
+ alert_group.log_records.create(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ author=first_user,
+ )
+
+ alert_group.log_records.create(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED,
+ author=second_user,
+ )
+
+ url = reverse("api-internal:alertgroup-list")
+
+ first_response = client.get(
+ url + f"?invitees_are={first_user.public_primary_key}" f"&invitees_are={second_user.public_primary_key}",
+ format="json",
+ **make_user_auth_headers(first_user, token),
+ )
+ assert first_response.status_code == status.HTTP_200_OK
+ assert first_response.data["count"] == 1
+
+
+@pytest.mark.django_db
+def test_get_filter_with_resolution_note(
+ alert_group_internal_api_setup,
+ make_resolution_note,
+ make_user_auth_headers,
+):
+ user, token, alert_groups = alert_group_internal_api_setup
+ res_alert_group, ack_alert_group, _, _ = alert_groups
+ client = APIClient()
+
+ url = reverse("api-internal:alertgroup-list")
+
+ # there are no alert groups with resolution_notes
+ response = client.get(url + "?with_resolution_note=true", format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["count"] == 0
+
+ response = client.get(url + "?with_resolution_note=false", format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["count"] == 4
+
+ # add resolution_notes to two of four alert groups
+ make_resolution_note(res_alert_group)
+ make_resolution_note(ack_alert_group)
+
+ response = client.get(url + "?with_resolution_note=true", format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["count"] == 2
+
+ response = client.get(url + "?with_resolution_note=false", format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["count"] == 2
+
+
+@pytest.mark.django_db
+def test_get_filter_with_resolution_note_after_delete_resolution_note(
+ alert_group_internal_api_setup,
+ make_resolution_note,
+ make_user_auth_headers,
+):
+ user, token, alert_groups = alert_group_internal_api_setup
+ res_alert_group, ack_alert_group, _, _ = alert_groups
+ client = APIClient()
+
+ url = reverse("api-internal:alertgroup-list")
+
+ # add resolution note to two alert group
+ resolution_note_res_alert_group = make_resolution_note(res_alert_group)
+ make_resolution_note(ack_alert_group)
+
+ # delete resolution note message using soft delete
+ resolution_note_res_alert_group.delete()
+ resolution_note_res_alert_group.refresh_from_db()
+ assert resolution_note_res_alert_group.deleted_at is not None
+
+ response = client.get(url + "?with_resolution_note=true", format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["count"] == 1
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_group_acknowledge_permissions(
+ make_user_for_organization, alert_group_internal_api_setup, make_user_auth_headers, role, expected_status
+):
+ client = APIClient()
+ _, token, alert_groups = alert_group_internal_api_setup
+ _, _, new_alert_group, _ = alert_groups
+ organization = new_alert_group.channel.organization
+ user = make_user_for_organization(organization, role)
+ url = reverse("api-internal:alertgroup-acknowledge", kwargs={"pk": new_alert_group.public_primary_key})
+
+ with patch(
+ "apps.api.views.alert_group.AlertGroupView.acknowledge",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_group_unacknowledge_permissions(
+ make_user_for_organization, alert_group_internal_api_setup, make_user_auth_headers, role, expected_status
+):
+ client = APIClient()
+ _, token, alert_groups = alert_group_internal_api_setup
+ _, _, new_alert_group, _ = alert_groups
+ organization = new_alert_group.channel.organization
+ user = make_user_for_organization(organization, role)
+ url = reverse("api-internal:alertgroup-unacknowledge", kwargs={"pk": new_alert_group.public_primary_key})
+
+ with patch(
+ "apps.api.views.alert_group.AlertGroupView.unacknowledge",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_group_resolve_permissions(
+ make_user_for_organization, alert_group_internal_api_setup, make_user_auth_headers, role, expected_status
+):
+ client = APIClient()
+ _, token, alert_groups = alert_group_internal_api_setup
+ _, _, new_alert_group, _ = alert_groups
+ organization = new_alert_group.channel.organization
+ user = make_user_for_organization(organization, role)
+ url = reverse("api-internal:alertgroup-resolve", kwargs={"pk": new_alert_group.public_primary_key})
+
+ with patch(
+ "apps.api.views.alert_group.AlertGroupView.resolve",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_group_unresolve_permissions(
+ make_user_for_organization, alert_group_internal_api_setup, make_user_auth_headers, role, expected_status
+):
+ client = APIClient()
+ _, token, alert_groups = alert_group_internal_api_setup
+ _, _, new_alert_group, _ = alert_groups
+ organization = new_alert_group.channel.organization
+ user = make_user_for_organization(organization, role)
+ url = reverse("api-internal:alertgroup-unresolve", kwargs={"pk": new_alert_group.public_primary_key})
+
+ with patch(
+ "apps.api.views.alert_group.AlertGroupView.unresolve",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_group_silence_permissions(
+ make_user_for_organization, alert_group_internal_api_setup, make_user_auth_headers, role, expected_status
+):
+ client = APIClient()
+ _, token, alert_groups = alert_group_internal_api_setup
+ _, _, new_alert_group, _ = alert_groups
+ organization = new_alert_group.channel.organization
+ user = make_user_for_organization(organization, role)
+ url = reverse("api-internal:alertgroup-silence", kwargs={"pk": new_alert_group.public_primary_key})
+
+ with patch(
+ "apps.api.views.alert_group.AlertGroupView.silence",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_group_unsilence_permissions(
+ make_user_for_organization, alert_group_internal_api_setup, make_user_auth_headers, role, expected_status
+):
+ client = APIClient()
+ _, token, alert_groups = alert_group_internal_api_setup
+ _, _, new_alert_group, _ = alert_groups
+ organization = new_alert_group.channel.organization
+ user = make_user_for_organization(organization, role)
+ url = reverse("api-internal:alertgroup-unsilence", kwargs={"pk": new_alert_group.public_primary_key})
+
+ with patch(
+ "apps.api.views.alert_group.AlertGroupView.unsilence",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_group_attach_permissions(
+ make_user_for_organization, alert_group_internal_api_setup, make_user_auth_headers, role, expected_status
+):
+ client = APIClient()
+ _, token, alert_groups = alert_group_internal_api_setup
+ _, _, new_alert_group, _ = alert_groups
+ organization = new_alert_group.channel.organization
+ user = make_user_for_organization(organization, role)
+ url = reverse("api-internal:alertgroup-attach", kwargs={"pk": new_alert_group.public_primary_key})
+
+ with patch(
+ "apps.api.views.alert_group.AlertGroupView.attach",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_group_unattach_permissions(
+ make_user_for_organization, alert_group_internal_api_setup, make_user_auth_headers, role, expected_status
+):
+ client = APIClient()
+ _, token, alert_groups = alert_group_internal_api_setup
+ _, _, new_alert_group, _ = alert_groups
+ organization = new_alert_group.channel.organization
+ user = make_user_for_organization(organization, role)
+ url = reverse("api-internal:alertgroup-unattach", kwargs={"pk": new_alert_group.public_primary_key})
+
+ with patch(
+ "apps.api.views.alert_group.AlertGroupView.unattach",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_alert_group_list_permissions(
+ make_user_for_organization, alert_group_internal_api_setup, make_user_auth_headers, role, expected_status
+):
+ client = APIClient()
+ _, token, alert_groups = alert_group_internal_api_setup
+ _, _, new_alert_group, _ = alert_groups
+ organization = new_alert_group.channel.organization
+ user = make_user_for_organization(organization, role)
+ url = reverse("api-internal:alertgroup-list")
+
+ with patch(
+ "apps.api.views.alert_group.AlertGroupView.list",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_alert_group_stats_permissions(
+ make_user_for_organization, alert_group_internal_api_setup, make_user_auth_headers, role, expected_status
+):
+ client = APIClient()
+ _, token, alert_groups = alert_group_internal_api_setup
+ _, _, new_alert_group, _ = alert_groups
+ organization = new_alert_group.channel.organization
+ user = make_user_for_organization(organization, role)
+ url = reverse("api-internal:alertgroup-stats")
+
+ with patch(
+ "apps.api.views.alert_group.AlertGroupView.stats",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_group_bulk_action_permissions(
+ make_user_for_organization, alert_group_internal_api_setup, make_user_auth_headers, role, expected_status
+):
+ client = APIClient()
+ _, token, alert_groups = alert_group_internal_api_setup
+ _, _, new_alert_group, _ = alert_groups
+ organization = new_alert_group.channel.organization
+ user = make_user_for_organization(organization, role)
+ url = reverse("api-internal:alertgroup-bulk-action")
+
+ with patch(
+ "apps.api.views.alert_group.AlertGroupView.bulk_action", return_value=Response(status=status.HTTP_200_OK)
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_alert_group_filters_permissions(
+ make_user_for_organization, alert_group_internal_api_setup, make_user_auth_headers, role, expected_status
+):
+ client = APIClient()
+ _, token, alert_groups = alert_group_internal_api_setup
+ _, _, new_alert_group, _ = alert_groups
+ organization = new_alert_group.channel.organization
+ user = make_user_for_organization(organization, role)
+ url = reverse("api-internal:alertgroup-filters")
+
+ with patch(
+ "apps.api.views.alert_group.AlertGroupView.filters",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_alert_group_detail_permissions(
+ make_user_for_organization, alert_group_internal_api_setup, make_user_auth_headers, role, expected_status
+):
+ client = APIClient()
+ _, token, alert_groups = alert_group_internal_api_setup
+ _, _, new_alert_group, _ = alert_groups
+ organization = new_alert_group.channel.organization
+ user = make_user_for_organization(organization, role)
+ url = reverse("api-internal:alertgroup-detail", kwargs={"pk": new_alert_group.public_primary_key})
+
+ with patch(
+ "apps.api.views.alert_group.AlertGroupView.retrieve",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+def test_silence(
+ alert_group_internal_api_setup,
+ make_user_auth_headers,
+):
+ client = APIClient()
+ user, token, alert_groups = alert_group_internal_api_setup
+ _, _, new_alert_group, _ = alert_groups
+ url = reverse("api-internal:alertgroup-silence", kwargs={"pk": new_alert_group.public_primary_key})
+
+ silence_delay = timezone.timedelta(seconds=60)
+ response = client.post(
+ url, data={"delay": silence_delay.seconds}, format="json", **make_user_auth_headers(user, token)
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+
+ new_alert_group.refresh_from_db()
+ assert new_alert_group.silenced_until is not None
+
+ assert new_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_SILENCE, author=user, silence_delay=silence_delay
+ ).exists()
+
+
+@pytest.mark.django_db
+def test_unsilence(
+ alert_group_internal_api_setup,
+ make_user_auth_headers,
+):
+ client = APIClient()
+ user, token, alert_groups = alert_group_internal_api_setup
+ _, _, new_alert_group, _ = alert_groups
+
+ silence_url = reverse("api-internal:alertgroup-silence", kwargs={"pk": new_alert_group.public_primary_key})
+ unsilence_url = reverse("api-internal:alertgroup-unsilence", kwargs={"pk": new_alert_group.public_primary_key})
+
+ # silence alert group
+ silence_delay = timezone.timedelta(seconds=10000)
+ client.post(
+ silence_url, data={"delay": silence_delay.seconds}, format="json", **make_user_auth_headers(user, token)
+ )
+
+ # unsnooze alert group
+ response = client.post(unsilence_url, **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+
+ new_alert_group.refresh_from_db()
+ assert new_alert_group.silenced_until is None
+
+ assert new_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_UN_SILENCE,
+ author=user,
+ ).exists()
+
+
+@pytest.mark.django_db
+def test_invalid_bulk_action(
+ make_user_auth_headers,
+ alert_group_internal_api_setup,
+):
+ client = APIClient()
+ user, token, alert_groups = alert_group_internal_api_setup
+ _, _, new_alert_group, _ = alert_groups
+
+ url = reverse("api-internal:alertgroup-bulk-action")
+
+ response = client.post(
+ url,
+ data={
+ "alert_group_pks": [alert_group.public_primary_key for alert_group in alert_groups],
+ "action": "invalid_action",
+ },
+ format="json",
+ **make_user_auth_headers(user, token),
+ )
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@patch("apps.alerts.tasks.send_alert_group_signal.apply_async", return_value=None)
+@patch("apps.alerts.tasks.send_update_log_report_signal.apply_async", return_value=None)
+@patch("apps.alerts.models.AlertGroup.start_escalation_if_needed", return_value=None)
+@pytest.mark.django_db
+def test_bulk_action_restart(
+ mocked_alert_group_signal_task,
+ mocked_log_report_signal_task,
+ mocked_start_escalate_alert,
+ make_user_auth_headers,
+ alert_group_internal_api_setup,
+):
+ client = APIClient()
+ user, token, alert_groups = alert_group_internal_api_setup
+ resolved_alert_group, acked_alert_group, new_alert_group, silenced_alert_group = alert_groups
+
+ url = reverse("api-internal:alertgroup-bulk-action")
+
+ assert not resolved_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_UN_RESOLVED,
+ author=user,
+ ).exists()
+
+ assert not acked_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_UN_ACK,
+ author=user,
+ ).exists()
+
+ assert not silenced_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_UN_SILENCE,
+ author=user,
+ ).exists()
+
+ # restart alert groups
+ response = client.post(
+ url,
+ data={
+ "alert_group_pks": [alert_group.public_primary_key for alert_group in alert_groups],
+ "action": AlertGroup.RESTART,
+ },
+ format="json",
+ **make_user_auth_headers(user, token),
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+
+ assert resolved_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_UN_RESOLVED,
+ author=user,
+ ).exists()
+
+ assert acked_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_UN_ACK,
+ author=user,
+ ).exists()
+
+ assert silenced_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_UN_SILENCE,
+ author=user,
+ ).exists()
+
+ assert mocked_alert_group_signal_task.called
+ assert mocked_log_report_signal_task.called
+ assert mocked_start_escalate_alert.called
+
+
+@patch("apps.alerts.tasks.send_alert_group_signal.apply_async", return_value=None)
+@patch("apps.alerts.tasks.send_update_log_report_signal.apply_async", return_value=None)
+@pytest.mark.django_db
+def test_bulk_action_acknowledge(
+ mocked_alert_group_signal_task,
+ mocked_log_report_signal_task,
+ make_user_auth_headers,
+ alert_group_internal_api_setup,
+):
+ client = APIClient()
+ user, token, alert_groups = alert_group_internal_api_setup
+ resolved_alert_group, acked_alert_group, new_alert_group, _ = alert_groups
+
+ url = reverse("api-internal:alertgroup-bulk-action")
+
+ assert not new_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_UN_RESOLVED,
+ author=user,
+ ).exists()
+
+ # acknowledge alert groups
+ response = client.post(
+ url,
+ data={
+ "alert_group_pks": [alert_group.public_primary_key for alert_group in alert_groups],
+ "action": AlertGroup.ACKNOWLEDGE,
+ },
+ format="json",
+ **make_user_auth_headers(user, token),
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+
+ assert new_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_ACK,
+ author=user,
+ ).exists()
+
+ assert resolved_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_UN_RESOLVED,
+ author=user,
+ ).exists()
+
+ assert resolved_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_ACK,
+ author=user,
+ ).exists()
+
+ assert not acked_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_ACK,
+ author=user,
+ ).exists()
+
+ assert mocked_alert_group_signal_task.called
+ assert mocked_log_report_signal_task.called
+
+
+@patch("apps.alerts.tasks.send_alert_group_signal.apply_async", return_value=None)
+@patch("apps.alerts.tasks.send_update_log_report_signal.apply_async", return_value=None)
+@pytest.mark.django_db
+def test_bulk_action_resolve(
+ mocked_alert_group_signal_task,
+ mocked_log_report_signal_task,
+ make_user_auth_headers,
+ alert_group_internal_api_setup,
+):
+ client = APIClient()
+ user, token, alert_groups = alert_group_internal_api_setup
+ resolved_alert_group, acked_alert_group, new_alert_group, _ = alert_groups
+
+ url = reverse("api-internal:alertgroup-bulk-action")
+
+ assert not new_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_RESOLVED,
+ author=user,
+ ).exists()
+
+ # resolve alert groups
+ response = client.post(
+ url,
+ data={
+ "alert_group_pks": [alert_group.public_primary_key for alert_group in alert_groups],
+ "action": AlertGroup.RESOLVE,
+ },
+ format="json",
+ **make_user_auth_headers(user, token),
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+
+ assert new_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_RESOLVED,
+ author=user,
+ ).exists()
+
+ assert acked_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_RESOLVED,
+ author=user,
+ ).exists()
+
+ assert not resolved_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_RESOLVED,
+ author=user,
+ ).exists()
+
+ assert mocked_alert_group_signal_task.called
+ assert mocked_log_report_signal_task.called
+
+
+@patch("apps.alerts.tasks.send_alert_group_signal.apply_async", return_value=None)
+@patch("apps.alerts.tasks.send_update_log_report_signal.apply_async", return_value=None)
+@patch("apps.alerts.models.AlertGroup.start_unsilence_task", return_value=None)
+@pytest.mark.django_db
+def test_bulk_action_silence(
+ mocked_alert_group_signal_task,
+ mocked_log_report_signal_task,
+ mocked_start_unsilence_task,
+ make_user_auth_headers,
+ alert_group_internal_api_setup,
+):
+ client = APIClient()
+ user, token, alert_groups = alert_group_internal_api_setup
+ resolved_alert_group, acked_alert_group, new_alert_group, silenced_alert_groups = alert_groups
+
+ url = reverse("api-internal:alertgroup-bulk-action")
+
+ assert not new_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_SILENCE,
+ author=user,
+ ).exists()
+
+ # silence alert groups
+ response = client.post(
+ url,
+ data={
+ "alert_group_pks": [alert_group.public_primary_key for alert_group in alert_groups],
+ "action": AlertGroup.SILENCE,
+ "delay": 180,
+ },
+ format="json",
+ **make_user_auth_headers(user, token),
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+
+ assert new_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_SILENCE,
+ author=user,
+ ).exists()
+
+ new_alert_group.refresh_from_db()
+ assert new_alert_group.silenced
+
+ assert acked_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_UN_ACK,
+ author=user,
+ ).exists()
+
+ assert acked_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_SILENCE,
+ author=user,
+ ).exists()
+
+ assert resolved_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_UN_RESOLVED,
+ author=user,
+ ).exists()
+
+ assert resolved_alert_group.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_SILENCE,
+ author=user,
+ ).exists()
+
+ assert silenced_alert_groups.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_UN_SILENCE,
+ author=user,
+ ).exists()
+
+ assert silenced_alert_groups.log_records.filter(
+ type=AlertGroupLogRecord.TYPE_SILENCE,
+ author=user,
+ ).exists()
+
+ assert mocked_alert_group_signal_task.called
+ assert mocked_log_report_signal_task.called
+ assert mocked_start_unsilence_task.called
+
+
+@pytest.mark.django_db
+def test_alert_group_status_field(
+ make_user_auth_headers,
+ alert_group_internal_api_setup,
+):
+ client = APIClient()
+
+ user, token, alert_groups = alert_group_internal_api_setup
+ resolved_alert_group, acked_alert_group, new_alert_group, silenced_alert_group = alert_groups
+
+ url = reverse("api-internal:alertgroup-detail", kwargs={"pk": new_alert_group.public_primary_key})
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.json()["status"] == AlertGroup.NEW
+
+ url = reverse("api-internal:alertgroup-detail", kwargs={"pk": acked_alert_group.public_primary_key})
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.json()["status"] == AlertGroup.ACKNOWLEDGED
+
+ url = reverse("api-internal:alertgroup-detail", kwargs={"pk": resolved_alert_group.public_primary_key})
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.json()["status"] == AlertGroup.RESOLVED
+
+ url = reverse("api-internal:alertgroup-detail", kwargs={"pk": silenced_alert_group.public_primary_key})
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.json()["status"] == AlertGroup.SILENCED
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_group_preview_template_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_user_auth_headers,
+ role,
+ expected_status,
+ make_alert_group,
+ make_alert,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+ make_alert(alert_group=alert_group, raw_request_data=alert_receive_channel.config.example_payload)
+ client = APIClient()
+ url = reverse("api-internal:alertgroup-preview-template", kwargs={"pk": alert_group.public_primary_key})
+
+ with patch(
+ "apps.api.views.alert_group.AlertGroupView.preview_template",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+def test_alert_group_preview_body_non_existent_template_var(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_user_auth_headers,
+ make_alert_group,
+ make_alert,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=Role.ADMIN)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+ make_alert(alert_group=alert_group, raw_request_data=alert_receive_channel.config.example_payload)
+ client = APIClient()
+ url = reverse("api-internal:alertgroup-preview-template", kwargs={"pk": alert_group.public_primary_key})
+
+ data = {"template_name": "email_title_template", "template_body": "foobar: {{ foobar.does_not_exist }}"}
+ response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["preview"] is None
+
+
+@pytest.mark.django_db
+def test_alert_group_preview_body_invalid_template_syntax(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_user_auth_headers,
+ make_alert_group,
+ make_alert,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=Role.ADMIN)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+ make_alert(alert_group=alert_group, raw_request_data=alert_receive_channel.config.example_payload)
+ client = APIClient()
+ url = reverse("api-internal:alertgroup-preview-template", kwargs={"pk": alert_group.public_primary_key})
+
+ data = {"template_name": "email_title_template", "template_body": "{{'' if foo is None else foo}}"}
+ response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
diff --git a/engine/apps/api/tests/test_alert_receive_channel.py b/engine/apps/api/tests/test_alert_receive_channel.py
new file mode 100644
index 0000000000..f849cc5f48
--- /dev/null
+++ b/engine/apps/api/tests/test_alert_receive_channel.py
@@ -0,0 +1,662 @@
+import json
+from unittest.mock import patch
+
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.test import APIClient
+
+from apps.alerts.models import AlertReceiveChannel, EscalationPolicy
+from common.constants.role import Role
+
+
+@pytest.fixture()
+def alert_receive_channel_internal_api_setup(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_escalation_chain,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ make_escalation_chain(organization)
+ return user, token, alert_receive_channel
+
+
+@pytest.mark.django_db
+def test_get_alert_receive_channel(alert_receive_channel_internal_api_setup, make_user_auth_headers):
+ user, token, _ = alert_receive_channel_internal_api_setup
+ client = APIClient()
+
+ url = reverse("api-internal:alert_receive_channel-list")
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+
+
+@pytest.mark.django_db
+def test_heartbeat_data_absence_alert_receive_channel(alert_receive_channel_internal_api_setup, make_user_auth_headers):
+ """
+ We get AlertReceiveChannel and there is no related heartbeat model object.
+ """
+ user, token, alert_receive_channel = alert_receive_channel_internal_api_setup
+ client = APIClient()
+
+ url = reverse("api-internal:alert_receive_channel-detail", kwargs={"pk": alert_receive_channel.public_primary_key})
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["heartbeat"] is None
+
+
+@pytest.mark.django_db
+def test_heartbeat_data_presence_alert_receive_channel(
+ alert_receive_channel_internal_api_setup,
+ make_integration_heartbeat,
+ make_user_auth_headers,
+):
+ """
+ We get AlertReceiveChannel and there IS related heartbeat model object.
+ That is why we check for heartbeat model properties.
+ """
+ user, token, alert_receive_channel = alert_receive_channel_internal_api_setup
+ _ = make_integration_heartbeat(alert_receive_channel)
+ client = APIClient()
+
+ url = reverse("api-internal:alert_receive_channel-detail", kwargs={"pk": alert_receive_channel.public_primary_key})
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ expected_heartbeat_payload = {
+ "id": alert_receive_channel.integration_heartbeat.public_primary_key,
+ "last_heartbeat_time_verbal": None,
+ "alert_receive_channel": alert_receive_channel.public_primary_key,
+ "link": alert_receive_channel.integration_heartbeat.link,
+ "timeout_seconds": 60,
+ "status": False,
+ "instruction": response.json()["heartbeat"]["instruction"],
+ }
+ assert response.json()["heartbeat"] is not None
+ assert response.json()["heartbeat"] == expected_heartbeat_payload
+
+
+@pytest.mark.django_db
+def test_create_alert_receive_channel(alert_receive_channel_internal_api_setup, make_user_auth_headers):
+ user, token, _ = alert_receive_channel_internal_api_setup
+ client = APIClient()
+
+ url = reverse("api-internal:alert_receive_channel-list")
+ data = {
+ "integration": AlertReceiveChannel.INTEGRATION_GRAFANA,
+ "team": None,
+ }
+ response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_201_CREATED
+
+
+@pytest.mark.django_db
+def test_create_invalid_alert_receive_channel(alert_receive_channel_internal_api_setup, make_user_auth_headers):
+ user, token, _ = alert_receive_channel_internal_api_setup
+ client = APIClient()
+
+ url = reverse("api-internal:alert_receive_channel-list")
+ data = {"integration": AlertReceiveChannel.INTEGRATION_GRAFANA, "verbal_name": ""}
+ response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_update_alert_receive_channel(alert_receive_channel_internal_api_setup, make_user_auth_headers):
+ user, token, alert_receive_channel = alert_receive_channel_internal_api_setup
+ client = APIClient()
+
+ url = reverse("api-internal:alert_receive_channel-detail", kwargs={"pk": alert_receive_channel.public_primary_key})
+ response = client.patch(
+ url,
+ data=json.dumps({"verbal_name": "test_set_verbal_name"}),
+ content_type="application/json",
+ **make_user_auth_headers(user, token),
+ )
+
+ alert_receive_channel.refresh_from_db()
+
+ assert response.status_code == status.HTTP_200_OK
+ assert alert_receive_channel.verbal_name == "test_set_verbal_name"
+
+
+@pytest.mark.django_db
+def test_integration_filter_by_maintenance(
+ alert_receive_channel_internal_api_setup,
+ make_user_auth_headers,
+ mock_start_disable_maintenance_task,
+ mock_alert_shooting_step_publish_slack_messages,
+):
+ user, token, alert_receive_channel = alert_receive_channel_internal_api_setup
+ client = APIClient()
+ mode = AlertReceiveChannel.MAINTENANCE
+ duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds
+ alert_receive_channel.start_maintenance(mode, duration, user)
+ url = reverse("api-internal:alert_receive_channel-list")
+ response = client.get(
+ f"{url}?maintenance_mode={AlertReceiveChannel.MAINTENANCE}",
+ content_type="application/json",
+ **make_user_auth_headers(user, token),
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+ assert len(response.data) == 1
+
+
+@pytest.mark.django_db
+def test_integration_filter_by_debug(
+ alert_receive_channel_internal_api_setup,
+ make_user_auth_headers,
+ mock_start_disable_maintenance_task,
+ mock_alert_shooting_step_publish_slack_messages,
+):
+ user, token, alert_receive_channel = alert_receive_channel_internal_api_setup
+ client = APIClient()
+ mode = AlertReceiveChannel.DEBUG_MAINTENANCE
+ duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds
+ with patch("apps.slack.utils.post_message_to_channel"):
+ alert_receive_channel.start_maintenance(mode, duration, user)
+ url = reverse("api-internal:alert_receive_channel-list")
+ response = client.get(
+ f"{url}?maintenance_mode={AlertReceiveChannel.DEBUG_MAINTENANCE}",
+ content_type="application/json",
+ **make_user_auth_headers(user, token),
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+ assert len(response.data) == 1
+
+
+@pytest.mark.django_db
+def test_integration_search(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ make_alert_receive_channel(organization, verbal_name="grafana_prod")
+ make_alert_receive_channel(organization, verbal_name="grafana_stage")
+ client = APIClient()
+
+ url = reverse("api-internal:alert_receive_channel-list")
+
+ response = client.get(
+ f"{url}?search=grafana", content_type="application/json", **make_user_auth_headers(user, token)
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+ assert len(response.data) == 2
+
+ response = client.get(
+ f"{url}?search=zabbix", content_type="application/json", **make_user_auth_headers(user, token)
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+ assert len(response.data) == 0
+
+ response = client.get(f"{url}?search=prod", content_type="application/json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ assert len(response.data) == 1
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_receive_channel_create_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ _, user, token = make_organization_and_user_with_plugin_token(role)
+ client = APIClient()
+
+ url = reverse("api-internal:alert_receive_channel-list")
+ with patch(
+ "apps.api.views.alert_receive_channel.AlertReceiveChannelView.create",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_receive_channel_update_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ client = APIClient()
+
+ url = reverse("api-internal:alert_receive_channel-detail", kwargs={"pk": alert_receive_channel.public_primary_key})
+
+ with patch(
+ "apps.api.views.alert_receive_channel.AlertReceiveChannelView.update",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.put(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+ response = client.patch(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_204_NO_CONTENT),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_receive_channel_delete_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ client = APIClient()
+
+ url = reverse("api-internal:alert_receive_channel-detail", kwargs={"pk": alert_receive_channel.public_primary_key})
+ with patch(
+ "apps.api.views.alert_receive_channel.AlertReceiveChannelView.destroy",
+ return_value=Response(
+ status=status.HTTP_204_NO_CONTENT,
+ ),
+ ):
+ response = client.delete(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [(Role.ADMIN, status.HTTP_200_OK), (Role.EDITOR, status.HTTP_200_OK), (Role.VIEWER, status.HTTP_200_OK)],
+)
+def test_alert_receive_channel_list_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ client = APIClient()
+
+ url = reverse("api-internal:alert_receive_channel-list")
+
+ with patch(
+ "apps.api.views.alert_receive_channel.AlertReceiveChannelView.list",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [(Role.ADMIN, status.HTTP_200_OK), (Role.EDITOR, status.HTTP_200_OK), (Role.VIEWER, status.HTTP_200_OK)],
+)
+def test_alert_receive_channel_detail_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ client = APIClient()
+
+ url = reverse("api-internal:alert_receive_channel-detail", kwargs={"pk": alert_receive_channel.public_primary_key})
+
+ with patch(
+ "apps.api.views.alert_receive_channel.AlertReceiveChannelView.retrieve",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_receive_channel_send_demo_alert_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ client = APIClient()
+
+ url = reverse(
+ "api-internal:alert_receive_channel-send-demo-alert", kwargs={"pk": alert_receive_channel.public_primary_key}
+ )
+
+ with patch(
+ "apps.api.views.alert_receive_channel.AlertReceiveChannelView.send_demo_alert",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_alert_receive_channel_integration_options_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ _, user, token = make_organization_and_user_with_plugin_token(role)
+ client = APIClient()
+
+ url = reverse("api-internal:alert_receive_channel-integration-options")
+
+ with patch(
+ "apps.api.views.alert_receive_channel.AlertReceiveChannelView.integration_options",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_receive_channel_preview_template_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ client = APIClient()
+ url = reverse(
+ "api-internal:alert_receive_channel-preview-template", kwargs={"pk": alert_receive_channel.public_primary_key}
+ )
+
+ with patch(
+ "apps.api.views.alert_receive_channel.AlertReceiveChannelView.preview_template",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize("template_name", ["title", "message", "image_url"])
+@pytest.mark.parametrize("notification_channel", ["slack", "web", "telegram"])
+def test_alert_receive_channel_preview_template_require_notification_channel(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ template_name,
+ notification_channel,
+ make_alert_group,
+ make_alert,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ make_alert(alert_group=alert_group, raw_request_data=alert_receive_channel.config.example_payload)
+
+ client = APIClient()
+ url = reverse(
+ "api-internal:alert_receive_channel-preview-template", kwargs={"pk": alert_receive_channel.public_primary_key}
+ )
+ data = {
+ "template_body": "Template",
+ "template_name": template_name,
+ }
+
+ response = client.post(url, data=data, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+ data = {
+ "template_body": "Template",
+ "template_name": f"{notification_channel}_{template_name}",
+ }
+
+ response = client.post(url, data=data, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_receive_channel_change_team_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ client = APIClient()
+ url = reverse(
+ "api-internal:alert_receive_channel-change-team", kwargs={"pk": alert_receive_channel.public_primary_key}
+ )
+
+ with patch(
+ "apps.api.views.alert_receive_channel.AlertReceiveChannelView.change_team",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.put(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+def test_alert_receive_channel_change_team(
+ make_organization_and_user_with_plugin_token,
+ make_team,
+ make_user_for_organization,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_integration_escalation_chain_route_escalation_policy,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ team = make_team(organization)
+ integration, escalation_chain, _, escalation_policy = make_integration_escalation_chain_route_escalation_policy(
+ organization, EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS
+ )
+ client = APIClient()
+ url = reverse("api-internal:alert_receive_channel-change-team", kwargs={"pk": integration.public_primary_key})
+
+ assert integration.team != team
+
+ # return 400 on change team for integration if user is not a member of chosen team
+ response = client.put(
+ f"{url}?team_id={team.public_primary_key}", format="json", **make_user_auth_headers(user, token)
+ )
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+ integration.refresh_from_db()
+ assert integration.team != team
+
+ team.users.add(user)
+ # return 400 on change team for integration if escalation_chain is connected to another integration
+ another_integration = make_alert_receive_channel(organization)
+ another_channel_filter = make_channel_filter(another_integration, escalation_chain=escalation_chain)
+ response = client.put(
+ f"{url}?team_id={team.public_primary_key}", format="json", **make_user_auth_headers(user, token)
+ )
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+ integration.refresh_from_db()
+ assert integration.team != team
+
+ another_channel_filter.escalation_chain = None
+ another_channel_filter.save()
+
+ # return 400 on change team for integration if user from escalation policy is not a member of team
+ another_user = make_user_for_organization(organization)
+ escalation_policy.notify_to_users_queue.add(another_user)
+ response = client.put(
+ f"{url}?team_id={team.public_primary_key}", format="json", **make_user_auth_headers(user, token)
+ )
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+ integration.refresh_from_db()
+ assert integration.team != team
+
+ team.users.add(another_user)
+ # otherwise change team
+ response = client.put(
+ f"{url}?team_id={team.public_primary_key}", format="json", **make_user_auth_headers(user, token)
+ )
+ assert response.status_code == status.HTTP_200_OK
+ integration.refresh_from_db()
+ assert integration.team == team
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_alert_receive_channel_counters_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ client = APIClient()
+
+ url = reverse(
+ "api-internal:alert_receive_channel-counters",
+ )
+
+ with patch(
+ "apps.api.views.alert_receive_channel.AlertReceiveChannelView.counters",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_alert_receive_channel_counters_per_integration_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ client = APIClient()
+ alert_receive_channel = make_alert_receive_channel(organization)
+
+ url = reverse(
+ "api-internal:alert_receive_channel-counters-per-integration",
+ kwargs={"pk": alert_receive_channel.public_primary_key},
+ )
+
+ with patch(
+ "apps.api.views.alert_receive_channel.AlertReceiveChannelView.counters_per_integration",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
diff --git a/engine/apps/api/tests/test_alert_receive_channel_template.py b/engine/apps/api/tests/test_alert_receive_channel_template.py
new file mode 100644
index 0000000000..08340646e2
--- /dev/null
+++ b/engine/apps/api/tests/test_alert_receive_channel_template.py
@@ -0,0 +1,269 @@
+from unittest.mock import patch
+
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.test import APIClient
+
+from common.constants.role import Role
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_receive_channel_template_update_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ client = APIClient()
+
+ url = reverse("api-internal:alert_receive_channel_template-detail", kwargs={"pk": alert_receive_channel.pk})
+ with patch(
+ "apps.api.views.alert_receive_channel_template.AlertReceiveChannelTemplateView.update",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.put(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+ response = client.patch(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_alert_receive_channel_template_detail_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ client = APIClient()
+
+ url = reverse("api-internal:alert_receive_channel_template-detail", kwargs={"pk": alert_receive_channel.pk})
+
+ with patch(
+ "apps.api.views.alert_receive_channel_template.AlertReceiveChannelTemplateView.retrieve",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+def test_alert_receive_channel_template_include_additional_backend_templates(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=Role.ADMIN)
+ alert_receive_channel = make_alert_receive_channel(
+ organization,
+ messaging_backends_templates={"TESTONLY": {"title": "the-title", "message": "the-message", "image_url": "url"}},
+ )
+ client = APIClient()
+
+ url = reverse(
+ "api-internal:alert_receive_channel_template-detail", kwargs={"pk": alert_receive_channel.public_primary_key}
+ )
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ templates_data = response.json()
+ assert templates_data["testonly_title_template"] == "the-title"
+ assert templates_data["testonly_message_template"] == "the-message"
+ assert templates_data["testonly_image_url_template"] == "url"
+
+
+@pytest.mark.django_db
+def test_alert_receive_channel_template_include_additional_backend_templates_using_defaults(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=Role.ADMIN)
+ alert_receive_channel = make_alert_receive_channel(organization, messaging_backends_templates=None)
+ client = APIClient()
+
+ url = reverse(
+ "api-internal:alert_receive_channel_template-detail", kwargs={"pk": alert_receive_channel.public_primary_key}
+ )
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ templates_data = response.json()
+ assert templates_data["testonly_title_template"] == alert_receive_channel.get_default_template_attribute(
+ "TESTONLY", "title"
+ )
+ assert templates_data["testonly_message_template"] == alert_receive_channel.get_default_template_attribute(
+ "TESTONLY", "message"
+ )
+ assert templates_data["testonly_image_url_template"] == alert_receive_channel.get_default_template_attribute(
+ "TESTONLY", "image_url"
+ )
+
+
+@pytest.mark.django_db
+def test_update_alert_receive_channel_backend_template_invalid_template(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=Role.ADMIN)
+ alert_receive_channel = make_alert_receive_channel(organization, messaging_backends_templates=None)
+ client = APIClient()
+
+ url = reverse(
+ "api-internal:alert_receive_channel_template-detail", kwargs={"pk": alert_receive_channel.public_primary_key}
+ )
+
+ response = client.put(
+ url, format="json", data={"testonly_title_template": "{{ wrong"}, **make_user_auth_headers(user, token)
+ )
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+ assert response.json() == {"testonly_title_template": "invalid template"}
+
+
+@pytest.mark.django_db
+def test_update_alert_receive_channel_backend_template_invalid_url(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=Role.ADMIN)
+ alert_receive_channel = make_alert_receive_channel(organization, messaging_backends_templates=None)
+ client = APIClient()
+
+ url = reverse(
+ "api-internal:alert_receive_channel_template-detail", kwargs={"pk": alert_receive_channel.public_primary_key}
+ )
+
+ response = client.put(
+ url, format="json", data={"testonly_image_url_template": "not-url"}, **make_user_auth_headers(user, token)
+ )
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+ assert response.json() == {"testonly_image_url_template": "invalid URL"}
+
+
+@pytest.mark.django_db
+def test_update_alert_receive_channel_backend_template_empty_values_allowed(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=Role.ADMIN)
+ alert_receive_channel = make_alert_receive_channel(organization, messaging_backends_templates=None)
+ client = APIClient()
+
+ url = reverse(
+ "api-internal:alert_receive_channel_template-detail", kwargs={"pk": alert_receive_channel.public_primary_key}
+ )
+
+ response = client.put(
+ url,
+ format="json",
+ data={"testonly_title_template": "", "testonly_image_url_template": ""},
+ **make_user_auth_headers(user, token),
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+ alert_receive_channel.refresh_from_db()
+ assert alert_receive_channel.messaging_backends_templates["TESTONLY"] == {"title": "", "image_url": ""}
+
+
+@pytest.mark.django_db
+def test_update_alert_receive_channel_backend_template_update_values(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=Role.ADMIN)
+ alert_receive_channel = make_alert_receive_channel(
+ organization,
+ messaging_backends_templates={
+ "TESTONLY": {"title": "the-title", "message": "some-message"},
+ "OTHER": {"title": "some-title"},
+ },
+ )
+ client = APIClient()
+
+ url = reverse(
+ "api-internal:alert_receive_channel_template-detail", kwargs={"pk": alert_receive_channel.public_primary_key}
+ )
+
+ # patch messaging backends to add OTHER as a valid backend
+ with patch(
+ "apps.api.serializers.alert_receive_channel.get_messaging_backends",
+ return_value=[("TESTONLY", None), ("OTHER", None)],
+ ):
+ response = client.put(
+ url, format="json", data={"testonly_title_template": "updated-title"}, **make_user_auth_headers(user, token)
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+ alert_receive_channel.refresh_from_db()
+ assert alert_receive_channel.messaging_backends_templates["TESTONLY"] == {
+ "title": "updated-title",
+ "message": "some-message",
+ }
+ assert alert_receive_channel.messaging_backends_templates["OTHER"] == {"title": "some-title"}
+
+
+@pytest.mark.django_db
+def test_preview_alert_receive_channel_backend_templater(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_alert_group,
+ make_alert,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=Role.ADMIN)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+ alert_group = make_alert_group(alert_receive_channel, channel_filter=default_channel_filter)
+ make_alert(alert_group=alert_group, raw_request_data={"title": "alert!"})
+ client = APIClient()
+
+ url = reverse(
+ "api-internal:alert_receive_channel-preview-template", kwargs={"pk": alert_receive_channel.public_primary_key}
+ )
+
+ data = {
+ "template_body": "title: {{ payload.title }}",
+ "template_name": "testonly_title_template",
+ }
+ response = client.post(url, format="json", data=data, **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == {"preview": "title: alert!"}
diff --git a/engine/apps/api/tests/test_channel_filter.py b/engine/apps/api/tests/test_channel_filter.py
new file mode 100644
index 0000000000..8a608eb82d
--- /dev/null
+++ b/engine/apps/api/tests/test_channel_filter.py
@@ -0,0 +1,488 @@
+from unittest.mock import patch
+
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.test import APIClient
+
+from common.constants.role import Role
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_channel_filter_create_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ _, user, token = make_organization_and_user_with_plugin_token(role)
+ client = APIClient()
+
+ url = reverse("api-internal:channel_filter-list")
+
+ with patch(
+ "apps.api.views.channel_filter.ChannelFilterView.create",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_channel_filter_update_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+ client = APIClient()
+
+ url = reverse("api-internal:channel_filter-detail", kwargs={"pk": channel_filter.public_primary_key})
+
+ with patch(
+ "apps.api.views.channel_filter.ChannelFilterView.update",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.put(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+ response = client.patch(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [(Role.ADMIN, status.HTTP_200_OK), (Role.EDITOR, status.HTTP_200_OK), (Role.VIEWER, status.HTTP_200_OK)],
+)
+def test_channel_filter_list_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ make_channel_filter(alert_receive_channel, is_default=True)
+ client = APIClient()
+
+ url = reverse("api-internal:channel_filter-list")
+
+ with patch(
+ "apps.api.views.channel_filter.ChannelFilterView.list",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [(Role.ADMIN, status.HTTP_200_OK), (Role.EDITOR, status.HTTP_200_OK), (Role.VIEWER, status.HTTP_200_OK)],
+)
+def test_channel_filter_retrieve_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+ client = APIClient()
+
+ url = reverse("api-internal:channel_filter-detail", kwargs={"pk": channel_filter.public_primary_key})
+
+ with patch(
+ "apps.api.views.channel_filter.ChannelFilterView.retrieve",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_204_NO_CONTENT),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_channel_filter_delete_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+ client = APIClient()
+
+ url = reverse("api-internal:channel_filter-detail", kwargs={"pk": channel_filter.public_primary_key})
+
+ with patch(
+ "apps.api.views.channel_filter.ChannelFilterView.destroy",
+ return_value=Response(
+ status=status.HTTP_204_NO_CONTENT,
+ ),
+ ):
+ response = client.delete(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_channel_filter_move_to_position_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+ client = APIClient()
+
+ url = reverse("api-internal:channel_filter-move-to-position", kwargs={"pk": channel_filter.public_primary_key})
+
+ with patch(
+ "apps.api.views.channel_filter.ChannelFilterView.move_to_position",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.put(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_alert_receive_channel_send_demo_alert_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ make_channel_filter,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+ client = APIClient()
+
+ url = reverse("api-internal:channel_filter-send-demo-alert", kwargs={"pk": channel_filter.public_primary_key})
+
+ with patch(
+ "apps.api.views.channel_filter.ChannelFilterView.send_demo_alert",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+def test_channel_filter_create_with_order(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_escalation_chain,
+ make_channel_filter,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ make_escalation_chain(organization)
+ # create default channel filter
+ make_channel_filter(alert_receive_channel, is_default=True)
+ channel_filter = make_channel_filter(alert_receive_channel, filtering_term="a", is_default=False)
+ client = APIClient()
+
+ url = reverse("api-internal:channel_filter-list")
+ data_for_creation = {
+ "alert_receive_channel": alert_receive_channel.public_primary_key,
+ "filtering_term": "b",
+ "order": 0,
+ }
+
+ response = client.post(url, data=data_for_creation, format="json", **make_user_auth_headers(user, token))
+ channel_filter.refresh_from_db()
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.json()["order"] == 0
+ assert channel_filter.order == 1
+
+
+@pytest.mark.django_db
+def test_channel_filter_create_without_order(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_escalation_chain,
+ make_channel_filter,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ make_escalation_chain(organization)
+ make_channel_filter(alert_receive_channel, is_default=True)
+ channel_filter = make_channel_filter(alert_receive_channel, filtering_term="a", is_default=False)
+ client = APIClient()
+
+ url = reverse("api-internal:channel_filter-list")
+ data_for_creation = {
+ "alert_receive_channel": alert_receive_channel.public_primary_key,
+ "filtering_term": "b",
+ }
+
+ response = client.post(url, data=data_for_creation, format="json", **make_user_auth_headers(user, token))
+ channel_filter.refresh_from_db()
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.json()["order"] == 1
+ assert channel_filter.order == 0
+
+
+@pytest.mark.django_db
+def test_channel_filter_update_with_order(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ # create default channel filter
+ make_channel_filter(alert_receive_channel, is_default=True)
+ first_channel_filter = make_channel_filter(alert_receive_channel, filtering_term="a", is_default=False)
+ second_channel_filter = make_channel_filter(alert_receive_channel, filtering_term="b", is_default=False)
+
+ client = APIClient()
+
+ url = reverse("api-internal:channel_filter-detail", kwargs={"pk": first_channel_filter.public_primary_key})
+ data_for_update = {
+ "id": first_channel_filter.public_primary_key,
+ "alert_receive_channel": alert_receive_channel.public_primary_key,
+ "order": 1,
+ "filtering_term": first_channel_filter.filtering_term,
+ }
+
+ response = client.put(url, data=data_for_update, format="json", **make_user_auth_headers(user, token))
+
+ first_channel_filter.refresh_from_db()
+ second_channel_filter.refresh_from_db()
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["order"] == 1
+ assert first_channel_filter.order == 1
+ assert second_channel_filter.order == 0
+
+
+@pytest.mark.django_db
+def test_channel_filter_update_without_order(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ # create default channel filter
+ make_channel_filter(alert_receive_channel, is_default=True)
+ first_channel_filter = make_channel_filter(alert_receive_channel, filtering_term="a", is_default=False)
+ second_channel_filter = make_channel_filter(alert_receive_channel, filtering_term="b", is_default=False)
+
+ client = APIClient()
+
+ url = reverse("api-internal:channel_filter-detail", kwargs={"pk": first_channel_filter.public_primary_key})
+ data_for_update = {
+ "id": first_channel_filter.public_primary_key,
+ "alert_receive_channel": alert_receive_channel.public_primary_key,
+ "filtering_term": first_channel_filter.filtering_term + "_updated",
+ }
+
+ response = client.put(url, data=data_for_update, format="json", **make_user_auth_headers(user, token))
+
+ first_channel_filter.refresh_from_db()
+ second_channel_filter.refresh_from_db()
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["order"] == 0
+ assert first_channel_filter.order == 0
+ assert second_channel_filter.order == 1
+
+
+@pytest.mark.django_db
+def test_channel_filter_notification_backends(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ extra_notification_backends = {"TESTONLY": {"channel_id": "abc123"}}
+ channel_filter = make_channel_filter(
+ alert_receive_channel,
+ notification_backends=extra_notification_backends,
+ )
+
+ client = APIClient()
+
+ url = reverse("api-internal:channel_filter-detail", kwargs={"pk": channel_filter.public_primary_key})
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["notification_backends"] == extra_notification_backends
+
+
+@pytest.mark.django_db
+def test_channel_filter_update_notification_backends(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ extra_notification_backends = {"TESTONLY": {"channel_id": "abc123"}}
+ channel_filter = make_channel_filter(alert_receive_channel)
+
+ client = APIClient()
+
+ url = reverse("api-internal:channel_filter-detail", kwargs={"pk": channel_filter.public_primary_key})
+ data_for_update = {
+ "notification_backends": extra_notification_backends,
+ }
+
+ response = client.put(url, data=data_for_update, format="json", **make_user_auth_headers(user, token))
+
+ channel_filter.refresh_from_db()
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["notification_backends"] == extra_notification_backends
+ assert channel_filter.notification_backends == extra_notification_backends
+
+
+@pytest.mark.django_db
+def test_channel_filter_update_notification_backends_updates_existing_data(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ existing_notification_backends = {"TESTONLY": {"enabled": True, "channel": "ABCDEF"}}
+ channel_filter = make_channel_filter(alert_receive_channel, notification_backends=existing_notification_backends)
+
+ client = APIClient()
+
+ url = reverse("api-internal:channel_filter-detail", kwargs={"pk": channel_filter.public_primary_key})
+ notification_backends_update = {"TESTONLY": {"channel": "abc123"}}
+ data_for_update = {
+ "notification_backends": notification_backends_update,
+ }
+
+ response = client.put(url, data=data_for_update, format="json", **make_user_auth_headers(user, token))
+
+ channel_filter.refresh_from_db()
+
+ expected_notification_backends = existing_notification_backends
+ for backend, updated_data in notification_backends_update.items():
+ expected_notification_backends[backend] = expected_notification_backends.get(backend, {}) | updated_data
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["notification_backends"] == expected_notification_backends
+ assert channel_filter.notification_backends == expected_notification_backends
+
+
+@pytest.mark.django_db
+def test_channel_filter_update_invalid_notification_backends(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ extra_notification_backends = {"INVALID": {"channel_id": "abc123"}}
+ channel_filter = make_channel_filter(alert_receive_channel)
+
+ client = APIClient()
+
+ url = reverse("api-internal:channel_filter-detail", kwargs={"pk": channel_filter.public_primary_key})
+ data_for_update = {
+ "notification_backends": extra_notification_backends,
+ }
+
+ response = client.put(url, data=data_for_update, format="json", **make_user_auth_headers(user, token))
+
+ channel_filter.refresh_from_db()
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+ assert response.json() == {"notification_backends": ["Invalid messaging backend"]}
+ assert channel_filter.notification_backends is None
diff --git a/engine/apps/api/tests/test_custom_button.py b/engine/apps/api/tests/test_custom_button.py
new file mode 100644
index 0000000000..3c358c90f3
--- /dev/null
+++ b/engine/apps/api/tests/test_custom_button.py
@@ -0,0 +1,379 @@
+import json
+from unittest.mock import patch
+
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.test import APIClient
+
+from apps.alerts.models import CustomButton
+from common.constants.role import Role
+
+TEST_URL = "https://amixr.io"
+
+
+@pytest.fixture()
+def custom_button_internal_api_setup(make_organization_and_user_with_plugin_token, make_custom_action):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ custom_button = make_custom_action(
+ name="github_button",
+ webhook="https://github.com/",
+ user="Chris Vanstras",
+ password="qwerty",
+ data='{"name": "{{ alert_payload }}"}',
+ authorization_header="auth_token",
+ organization=organization,
+ )
+ return user, token, custom_button
+
+
+@pytest.mark.django_db
+def test_get_list_custom_button(custom_button_internal_api_setup, make_user_auth_headers):
+ user, token, custom_button = custom_button_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:custom_button-list")
+
+ expected_payload = [
+ {
+ "id": custom_button.public_primary_key,
+ "name": "github_button",
+ "team": None,
+ "webhook": "https://github.com/",
+ "data": '{"name": "{{ alert_payload }}"}',
+ "user": "Chris Vanstras",
+ "password": "qwerty",
+ "authorization_header": "auth_token",
+ "forward_whole_payload": False,
+ }
+ ]
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_payload
+
+
+@pytest.mark.django_db
+def test_get_detail_custom_button(custom_button_internal_api_setup, make_user_auth_headers):
+ user, token, custom_button = custom_button_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:custom_button-detail", kwargs={"pk": custom_button.public_primary_key})
+
+ expected_payload = {
+ "id": custom_button.public_primary_key,
+ "name": "github_button",
+ "team": None,
+ "webhook": "https://github.com/",
+ "data": '{"name": "{{ alert_payload }}"}',
+ "user": "Chris Vanstras",
+ "password": "qwerty",
+ "authorization_header": "auth_token",
+ "forward_whole_payload": False,
+ }
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_payload
+
+
+@pytest.mark.django_db
+def test_create_custom_button(custom_button_internal_api_setup, make_user_auth_headers):
+ user, token, custom_button = custom_button_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:custom_button-list")
+
+ data = {
+ "name": "amixr_button",
+ "webhook": TEST_URL,
+ "team": None,
+ }
+ response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
+ custom_button = CustomButton.objects.get(public_primary_key=response.data["id"])
+ expected_response = data | {
+ "id": custom_button.public_primary_key,
+ "user": None,
+ "password": None,
+ "data": None,
+ "authorization_header": None,
+ "forward_whole_payload": False,
+ }
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.data == expected_response
+
+
+@pytest.mark.django_db
+def test_create_valid_data_button(custom_button_internal_api_setup, make_user_auth_headers):
+ user, token, custom_button = custom_button_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:custom_button-list")
+
+ data = {
+ "name": "amixr_button_with_valid_data",
+ "webhook": TEST_URL,
+ "data": '{"name": "{{ alert_payload }}"}',
+ "team": None,
+ }
+
+ response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
+ # modify initial data by adding id and None for optional fields
+ custom_button = CustomButton.objects.get(public_primary_key=response.data["id"])
+ expected_response = data | {
+ "id": custom_button.public_primary_key,
+ "user": None,
+ "password": None,
+ "authorization_header": None,
+ "forward_whole_payload": False,
+ }
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_create_invalid_url_custom_button(custom_button_internal_api_setup, make_user_auth_headers):
+ user, token, custom_button = custom_button_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:custom_button-list")
+
+ data = {
+ "name": "amixr_button_invalid_url",
+ "webhook": "invalid_url",
+ }
+ response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_create_invalid_data_custom_button(custom_button_internal_api_setup, make_user_auth_headers):
+ user, token, custom_button = custom_button_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:custom_button-list")
+
+ data = {
+ "name": "amixr_button_invalid_data",
+ "webhook": TEST_URL,
+ "data": "invalid_json",
+ }
+ response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_update_custom_button(custom_button_internal_api_setup, make_user_auth_headers):
+ user, token, custom_button = custom_button_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:custom_button-detail", kwargs={"pk": custom_button.public_primary_key})
+
+ data = {
+ "name": "github_button_updated",
+ "webhook": "https://github.com/",
+ "team": None,
+ }
+ response = client.put(
+ url, data=json.dumps(data), content_type="application/json", **make_user_auth_headers(user, token)
+ )
+ updated_instance = CustomButton.objects.get(public_primary_key=custom_button.public_primary_key)
+ assert response.status_code == status.HTTP_200_OK
+ assert updated_instance.name == "github_button_updated"
+
+
+@pytest.mark.django_db
+def test_delete_custom_button(custom_button_internal_api_setup, make_user_auth_headers):
+ user, token, custom_button = custom_button_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:custom_button-detail", kwargs={"pk": custom_button.public_primary_key})
+
+ response = client.delete(url, **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_custom_button_create_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_custom_action,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ _, user, token = make_organization_and_user_with_plugin_token(role)
+ client = APIClient()
+
+ url = reverse("api-internal:custom_button-list")
+
+ with patch(
+ "apps.api.views.custom_button.CustomButtonView.create",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_custom_button_update_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_custom_action,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ custom_button = make_custom_action(organization=organization)
+ client = APIClient()
+
+ url = reverse("api-internal:custom_button-detail", kwargs={"pk": custom_button.public_primary_key})
+
+ with patch(
+ "apps.api.views.custom_button.CustomButtonView.update",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.put(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+ response = client.patch(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [(Role.ADMIN, status.HTTP_200_OK), (Role.EDITOR, status.HTTP_200_OK), (Role.VIEWER, status.HTTP_200_OK)],
+)
+def test_custom_button_list_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_custom_action,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ make_custom_action(organization=organization)
+ client = APIClient()
+
+ url = reverse("api-internal:custom_button-list")
+
+ with patch(
+ "apps.api.views.custom_button.CustomButtonView.list",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [(Role.ADMIN, status.HTTP_200_OK), (Role.EDITOR, status.HTTP_200_OK), (Role.VIEWER, status.HTTP_200_OK)],
+)
+def test_custom_button_retrieve_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_custom_action,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ custom_button = make_custom_action(organization=organization)
+ client = APIClient()
+
+ url = reverse("api-internal:custom_button-detail", kwargs={"pk": custom_button.public_primary_key})
+
+ with patch(
+ "apps.api.views.custom_button.CustomButtonView.retrieve",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_204_NO_CONTENT),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_custom_button_delete_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_custom_action,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ custom_button = make_custom_action(organization=organization)
+ client = APIClient()
+
+ url = reverse("api-internal:custom_button-detail", kwargs={"pk": custom_button.public_primary_key})
+
+ with patch(
+ "apps.api.views.custom_button.CustomButtonView.destroy",
+ return_value=Response(
+ status=status.HTTP_204_NO_CONTENT,
+ ),
+ ):
+ response = client.delete(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_custom_button_action_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_custom_action,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ custom_button = make_custom_action(organization=organization)
+ client = APIClient()
+
+ url = reverse("api-internal:custom_button-action", kwargs={"pk": custom_button.public_primary_key})
+
+ with patch(
+ "apps.api.views.custom_button.CustomButtonView.action",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
diff --git a/engine/apps/api/tests/test_escalation_chain.py b/engine/apps/api/tests/test_escalation_chain.py
new file mode 100644
index 0000000000..ee88fa1818
--- /dev/null
+++ b/engine/apps/api/tests/test_escalation_chain.py
@@ -0,0 +1,39 @@
+import json
+
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+
+@pytest.fixture()
+def escalation_chain_internal_api_setup(make_organization_and_user_with_plugin_token, make_escalation_chain):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ escalation_chain = make_escalation_chain(organization)
+ return user, token, escalation_chain
+
+
+@pytest.mark.django_db
+def test_delete_escalation_chain(escalation_chain_internal_api_setup, make_user_auth_headers):
+ user, token, escalation_chain = escalation_chain_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:escalation_chain-detail", kwargs={"pk": escalation_chain.public_primary_key})
+
+ response = client.delete(url, **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+
+
+@pytest.mark.django_db
+def test_update_escalation_chain(escalation_chain_internal_api_setup, make_user_auth_headers, make_organization):
+ user, token, escalation_chain = escalation_chain_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:escalation_chain-detail", kwargs={"pk": escalation_chain.public_primary_key})
+ data = {
+ "name": "escalation_chain_updated",
+ "organization": escalation_chain.organization.public_primary_key,
+ "team": None,
+ }
+ response = client.put(
+ url, data=json.dumps(data), content_type="application/json", **make_user_auth_headers(user, token)
+ )
+ assert response.status_code == status.HTTP_200_OK
diff --git a/engine/apps/api/tests/test_escalation_policy.py b/engine/apps/api/tests/test_escalation_policy.py
new file mode 100644
index 0000000000..f1e4c80456
--- /dev/null
+++ b/engine/apps/api/tests/test_escalation_policy.py
@@ -0,0 +1,866 @@
+from unittest.mock import patch
+
+import pytest
+from django.db.models import Max
+from django.urls import reverse
+from django.utils.timezone import timedelta
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.test import APIClient
+
+from apps.alerts.models import EscalationPolicy
+from common.constants.role import Role
+
+
+@pytest.fixture()
+def escalation_policy_internal_api_setup(
+ make_organization_and_user_with_plugin_token,
+ make_escalation_chain,
+ make_user_for_organization,
+ make_escalation_policy,
+):
+ organization, first_user, token = make_organization_and_user_with_plugin_token()
+ second_user = make_user_for_organization(organization)
+
+ escalation_chain = make_escalation_chain(organization)
+ escalation_policy = make_escalation_policy(
+ escalation_chain=escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_WAIT,
+ wait_delay=EscalationPolicy.ONE_MINUTE,
+ )
+ return token, escalation_chain, escalation_policy, first_user, second_user
+
+
+@pytest.mark.django_db
+def test_create_escalation_policy(escalation_policy_internal_api_setup, make_user_auth_headers):
+ token, escalation_chain, _, user, _ = escalation_policy_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:escalation_policy-list")
+
+ data = {
+ "step": EscalationPolicy.STEP_WAIT,
+ "wait_delay": "60.0",
+ "escalation_chain": escalation_chain.public_primary_key,
+ "notify_to_users_queue": [],
+ "from_time": None,
+ "to_time": None,
+ }
+
+ max_order = EscalationPolicy.objects.filter(escalation_chain=escalation_chain).aggregate(maxorder=Max("order"))[
+ "maxorder"
+ ]
+
+ response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.data["order"] == max_order + 1
+
+
+@pytest.mark.django_db
+def test_update_notify_multiple_users_step(escalation_policy_internal_api_setup, make_user_auth_headers):
+ token, _, escalation_policy, first_user, second_user = escalation_policy_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:escalation_policy-detail", kwargs={"pk": escalation_policy.public_primary_key})
+
+ data = {
+ "step": EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ "notify_to_users_queue": [first_user.public_primary_key, second_user.public_primary_key],
+ }
+ response = client.put(url, data, format="json", **make_user_auth_headers(first_user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["step"] == EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS
+ assert response.json()["notify_to_users_queue"] == [first_user.public_primary_key, second_user.public_primary_key]
+
+
+@pytest.mark.django_db
+def test_move_to_position(escalation_policy_internal_api_setup, make_user_auth_headers):
+ token, _, escalation_policy, user, _ = escalation_policy_internal_api_setup
+ client = APIClient()
+
+ position_to_move = 1
+ url = reverse(
+ "api-internal:escalation_policy-move-to-position", kwargs={"pk": escalation_policy.public_primary_key}
+ )
+ response = client.put(
+ f"{url}?position={position_to_move}", content_type="application/json", **make_user_auth_headers(user, token)
+ )
+ escalation_policy.refresh_from_db()
+ assert response.status_code == status.HTTP_200_OK
+ assert escalation_policy.order == position_to_move
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_escalation_policy_create_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_escalation_chain,
+ make_escalation_policy,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ escalation_chain = make_escalation_chain(organization)
+ make_escalation_policy(
+ escalation_chain, escalation_policy_step=EscalationPolicy.STEP_WAIT, wait_delay=EscalationPolicy.ONE_MINUTE
+ )
+ client = APIClient()
+
+ url = reverse("api-internal:escalation_policy-list")
+
+ with patch(
+ "apps.api.views.escalation_policy.EscalationPolicyView.create",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_escalation_policy_update_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_escalation_chain,
+ make_escalation_policy,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ escalation_chain = make_escalation_chain(organization)
+ escalation_policy = make_escalation_policy(
+ escalation_chain, escalation_policy_step=EscalationPolicy.STEP_WAIT, wait_delay=EscalationPolicy.ONE_MINUTE
+ )
+ client = APIClient()
+
+ url = reverse("api-internal:escalation_policy-detail", kwargs={"pk": escalation_policy.public_primary_key})
+
+ with patch(
+ "apps.api.views.escalation_policy.EscalationPolicyView.update",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.put(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+ response = client.patch(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_escalation_policy_list_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_escalation_chain,
+ make_escalation_policy,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ escalation_chain = make_escalation_chain(organization)
+ make_escalation_policy(
+ escalation_chain, escalation_policy_step=EscalationPolicy.STEP_WAIT, wait_delay=EscalationPolicy.ONE_MINUTE
+ )
+ client = APIClient()
+
+ url = reverse("api-internal:escalation_policy-list")
+
+ with patch(
+ "apps.api.views.escalation_policy.EscalationPolicyView.list",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_escalation_policy_retrieve_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_escalation_chain,
+ make_escalation_policy,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ escalation_chain = make_escalation_chain(organization)
+ escalation_policy = make_escalation_policy(
+ escalation_chain, escalation_policy_step=EscalationPolicy.STEP_WAIT, wait_delay=EscalationPolicy.ONE_MINUTE
+ )
+ client = APIClient()
+
+ url = reverse("api-internal:escalation_policy-detail", kwargs={"pk": escalation_policy.public_primary_key})
+
+ with patch(
+ "apps.api.views.escalation_policy.EscalationPolicyView.retrieve",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_204_NO_CONTENT),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_escalation_policy_delete_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_escalation_chain,
+ make_escalation_policy,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ escalation_chain = make_escalation_chain(organization)
+ escalation_policy = make_escalation_policy(
+ escalation_chain, escalation_policy_step=EscalationPolicy.STEP_WAIT, wait_delay=EscalationPolicy.ONE_MINUTE
+ )
+ client = APIClient()
+
+ url = reverse("api-internal:escalation_policy-detail", kwargs={"pk": escalation_policy.public_primary_key})
+
+ with patch(
+ "apps.api.views.escalation_policy.EscalationPolicyView.destroy",
+ return_value=Response(
+ status=status.HTTP_204_NO_CONTENT,
+ ),
+ ):
+ response = client.delete(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_escalation_policy_escalation_options_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_escalation_chain,
+ make_escalation_policy,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ escalation_chain = make_escalation_chain(organization)
+ make_escalation_policy(
+ escalation_chain, escalation_policy_step=EscalationPolicy.STEP_WAIT, wait_delay=EscalationPolicy.ONE_MINUTE
+ )
+ client = APIClient()
+
+ url = reverse("api-internal:escalation_policy-escalation-options")
+
+ with patch(
+ "apps.api.views.escalation_policy.EscalationPolicyView.escalation_options",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_escalation_policy_delay_options_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_escalation_chain,
+ make_escalation_policy,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+
+ escalation_chain = make_escalation_chain(organization)
+ make_escalation_policy(
+ escalation_chain, escalation_policy_step=EscalationPolicy.STEP_WAIT, wait_delay=EscalationPolicy.ONE_MINUTE
+ )
+ client = APIClient()
+
+ url = reverse("api-internal:escalation_policy-delay-options")
+
+ with patch(
+ "apps.api.views.escalation_policy.EscalationPolicyView.delay_options",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_escalation_policy_move_to_position_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_escalation_chain,
+ make_escalation_policy,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+
+ escalation_chain = make_escalation_chain(organization)
+ escalation_policy = make_escalation_policy(
+ escalation_chain, escalation_policy_step=EscalationPolicy.STEP_WAIT, wait_delay=EscalationPolicy.ONE_MINUTE
+ )
+ client = APIClient()
+
+ url = reverse("api-internal:escalation_policy-detail", kwargs={"pk": escalation_policy.public_primary_key})
+
+ with patch(
+ "apps.api.views.escalation_policy.EscalationPolicyView.move_to_position",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "important_step ,expected_default_step",
+ [
+ (EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT, EscalationPolicy.STEP_NOTIFY_GROUP),
+ (EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT, EscalationPolicy.STEP_NOTIFY_SCHEDULE),
+ (EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT, EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS),
+ ],
+)
+def test_escalation_policy_maps_default_to_important(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_escalation_chain,
+ make_escalation_policy,
+ important_step,
+ expected_default_step,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+
+ escalation_chain = make_escalation_chain(organization)
+ escalation_policy = make_escalation_policy(
+ escalation_chain,
+ escalation_policy_step=important_step,
+ )
+ client = APIClient()
+
+ url = reverse("api-internal:escalation_policy-detail", kwargs={"pk": escalation_policy.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.json()["step"] == expected_default_step
+ assert response.json()["important"] is True
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "default_step",
+ [
+ EscalationPolicy.STEP_NOTIFY_GROUP,
+ EscalationPolicy.STEP_NOTIFY_SCHEDULE,
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ ],
+)
+def test_escalation_policy_default_steps_stay_default(
+ make_organization_and_user_with_plugin_token,
+ make_escalation_chain,
+ make_escalation_policy,
+ default_step,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+
+ escalation_chain = make_escalation_chain(organization)
+ escalation_policy = make_escalation_policy(
+ escalation_chain,
+ escalation_policy_step=default_step,
+ )
+ client = APIClient()
+
+ url = reverse("api-internal:escalation_policy-detail", kwargs={"pk": escalation_policy.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.json()["step"] == default_step
+ assert response.json()["important"] is False
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "default_step ,expected_important_step",
+ [
+ (EscalationPolicy.STEP_NOTIFY_GROUP, EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT),
+ (EscalationPolicy.STEP_NOTIFY_SCHEDULE, EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT),
+ (EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS, EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT),
+ ],
+)
+def test_create_escalation_policy_important(
+ make_organization_and_user_with_slack_identities,
+ make_token_for_organization,
+ make_escalation_chain,
+ default_step,
+ expected_important_step,
+ make_user_auth_headers,
+):
+ organization, user, _, _ = make_organization_and_user_with_slack_identities()
+ _, token = make_token_for_organization(organization)
+ escalation_chain = make_escalation_chain(organization)
+
+ client = APIClient()
+ data_for_creation = {
+ "escalation_chain": escalation_chain.public_primary_key,
+ "step": default_step,
+ "important": True,
+ }
+ url = reverse("api-internal:escalation_policy-list")
+
+ response = client.post(url, data=data_for_creation, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_201_CREATED
+ public_primary_key = response.json()["id"]
+ created_escalation_policy = EscalationPolicy.objects.get(public_primary_key=public_primary_key)
+ assert created_escalation_policy.step == expected_important_step
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "default_step",
+ [
+ EscalationPolicy.STEP_NOTIFY_GROUP,
+ EscalationPolicy.STEP_NOTIFY_SCHEDULE,
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ ],
+)
+def test_create_escalation_policy_default(
+ make_organization_and_user_with_slack_identities,
+ make_token_for_organization,
+ make_escalation_chain,
+ default_step,
+ make_user_auth_headers,
+):
+ organization, user, _, _ = make_organization_and_user_with_slack_identities()
+ _, token = make_token_for_organization(organization)
+ escalation_chain = make_escalation_chain(organization)
+
+ client = APIClient()
+ data_for_creation = {
+ "escalation_chain": escalation_chain.public_primary_key,
+ "step": default_step,
+ "important": False,
+ }
+ url = reverse("api-internal:escalation_policy-list")
+
+ response = client.post(url, data=data_for_creation, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_201_CREATED
+ public_primary_key = response.json()["id"]
+ created_escalation_policy = EscalationPolicy.objects.get(public_primary_key=public_primary_key)
+ assert created_escalation_policy.step == default_step
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize("step", EscalationPolicy.STEPS_WITH_NO_IMPORTANT_VERSION_SET)
+def test_create_escalation_policy_with_no_important_version(
+ make_organization_and_user_with_slack_identities,
+ make_token_for_organization,
+ make_escalation_chain,
+ step,
+ make_user_auth_headers,
+):
+ organization, user, _, _ = make_organization_and_user_with_slack_identities()
+ _, token = make_token_for_organization(organization)
+ escalation_chain = make_escalation_chain(organization)
+
+ client = APIClient()
+ data_for_creation = {
+ "escalation_chain": escalation_chain.public_primary_key,
+ "step": step,
+ }
+ url = reverse("api-internal:escalation_policy-list")
+
+ response = client.post(url, data=data_for_creation, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_201_CREATED
+ public_primary_key = response.json()["id"]
+ created_escalation_policy = EscalationPolicy.objects.get(public_primary_key=public_primary_key)
+ assert created_escalation_policy.step == step
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize("step", EscalationPolicy.STEPS_WITH_NO_IMPORTANT_VERSION_SET)
+def test_escalation_policy_can_not_create_invalid_important_step(
+ make_organization_and_user_with_slack_identities,
+ make_token_for_organization,
+ make_escalation_chain,
+ step,
+ make_user_auth_headers,
+):
+ organization, user, _, _ = make_organization_and_user_with_slack_identities()
+ _, token = make_token_for_organization(organization)
+ escalation_chain = make_escalation_chain(organization)
+
+ client = APIClient()
+ data_for_creation = {"escalation_chain": escalation_chain.public_primary_key, "step": step, "important": True}
+ url = reverse("api-internal:escalation_policy-list")
+
+ response = client.post(url, data=data_for_creation, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize("step", EscalationPolicy.INTERNAL_API_STEPS)
+def test_escalation_policy_can_not_create_with_non_step_type_related_data(
+ make_organization_and_user_with_slack_identities,
+ make_token_for_organization,
+ make_escalation_chain,
+ step,
+ make_user_auth_headers,
+):
+ organization, user, _, _ = make_organization_and_user_with_slack_identities()
+ _, token = make_token_for_organization(organization)
+
+ escalation_chain = make_escalation_chain(organization)
+
+ client = APIClient()
+ data_for_creation = {
+ "escalation_chain": escalation_chain.public_primary_key,
+ "step": step,
+ "notify_to_users_queue": [user.public_primary_key],
+ "wait_delay": "300.0",
+ "from_time": "06:50:00",
+ "to_time": "04:10:00",
+ }
+ url = reverse("api-internal:escalation_policy-list")
+
+ response = client.post(url, data=data_for_creation, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "step, related_fields",
+ [
+ (EscalationPolicy.STEP_WAIT, ["wait_delay"]),
+ (EscalationPolicy.STEP_FINAL_NOTIFYALL, []),
+ (EscalationPolicy.STEP_FINAL_RESOLVE, []),
+ (EscalationPolicy.STEP_NOTIFY_GROUP, ["notify_to_group"]),
+ (EscalationPolicy.STEP_NOTIFY_SCHEDULE, ["notify_schedule"]),
+ (EscalationPolicy.STEP_NOTIFY_USERS_QUEUE, ["notify_to_users_queue"]),
+ (EscalationPolicy.STEP_NOTIFY_IF_TIME, ["from_time", "to_time"]),
+ (EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS, ["notify_to_users_queue"]),
+ (EscalationPolicy.STEP_TRIGGER_CUSTOM_BUTTON, ["custom_button_trigger"]),
+ ],
+)
+def test_escalation_policy_update_drop_non_step_type_related_data(
+ make_organization_and_user_with_slack_identities,
+ make_token_for_organization,
+ make_escalation_chain,
+ make_escalation_policy,
+ step,
+ related_fields,
+ make_user_auth_headers,
+):
+ organization, user, _, _ = make_organization_and_user_with_slack_identities()
+ _, token = make_token_for_organization(organization)
+
+ escalation_chain = make_escalation_chain(organization)
+
+ data_for_creation = {
+ "wait_delay": timedelta(minutes=5),
+ "from_time": "06:50:00",
+ "to_time": "04:10:00",
+ }
+
+ escalation_policy = make_escalation_policy(
+ escalation_chain=escalation_chain, escalation_policy_step=EscalationPolicy.STEP_WAIT, **data_for_creation
+ )
+
+ escalation_policy.notify_to_users_queue.set([user])
+
+ data_for_update = {"step": step}
+
+ fields_to_check = [
+ "wait_delay",
+ "notify_schedule",
+ "notify_to_users_queue",
+ "notify_to_group",
+ "from_time",
+ "to_time",
+ "custom_button_trigger",
+ ]
+ for f in related_fields:
+ fields_to_check.remove(f)
+
+ client = APIClient()
+
+ url = reverse("api-internal:escalation_policy-detail", kwargs={"pk": escalation_policy.public_primary_key})
+
+ response = client.put(url, data=data_for_update, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+
+ escalation_policy.refresh_from_db()
+
+ for f in fields_to_check:
+ if f == "notify_to_users_queue":
+ assert len(list(getattr(escalation_policy, f).all())) == 0
+ else:
+ assert getattr(escalation_policy, f) is None
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize("step", EscalationPolicy.DEFAULT_STEPS_SET)
+def test_escalation_policy_switch_importance(
+ make_organization_and_user_with_slack_identities,
+ make_token_for_organization,
+ make_escalation_chain,
+ make_escalation_policy,
+ step,
+ make_user_auth_headers,
+):
+ organization, user, _, _ = make_organization_and_user_with_slack_identities()
+ _, token = make_token_for_organization(organization)
+ escalation_chain = make_escalation_chain(organization)
+
+ escalation_policy = make_escalation_policy(
+ escalation_chain=escalation_chain,
+ escalation_policy_step=step,
+ )
+ data_for_update = {
+ "id": escalation_policy.public_primary_key,
+ "step": escalation_policy.step,
+ "order": escalation_policy.order,
+ "escalation_chain": escalation_chain.public_primary_key,
+ "notify_to_users_queue": [],
+ "from_time": None,
+ "to_time": None,
+ "num_alerts_in_window": None,
+ "num_minutes_in_window": None,
+ "slack_integration_required": escalation_policy.slack_integration_required,
+ "custom_button_trigger": None,
+ "notify_schedule": None,
+ "notify_to_group": None,
+ "important": True,
+ "wait_delay": None,
+ }
+
+ client = APIClient()
+
+ url = reverse("api-internal:escalation_policy-detail", kwargs={"pk": escalation_policy.public_primary_key})
+
+ response = client.put(url, data=data_for_update, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+
+ assert response.json() == data_for_update
+
+
+@pytest.mark.django_db
+def test_escalation_policy_filter_by_user(
+ make_organization_and_user_with_plugin_token,
+ make_user_for_organization,
+ make_escalation_chain,
+ make_escalation_policy,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ second_user = make_user_for_organization(organization)
+ escalation_chain = make_escalation_chain(organization)
+
+ client = APIClient()
+ escalation_policy_with_one_user = make_escalation_policy(
+ escalation_chain=escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ )
+
+ escalation_policy_with_two_users = make_escalation_policy(
+ escalation_chain=escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ )
+
+ escalation_policy_with_one_user.notify_to_users_queue.set([user])
+ escalation_policy_with_two_users.notify_to_users_queue.set([user, second_user])
+
+ expected_payload = [
+ {
+ "id": escalation_policy_with_one_user.public_primary_key,
+ "order": 0,
+ "step": 13,
+ "wait_delay": None,
+ "escalation_chain": escalation_chain.public_primary_key,
+ "notify_to_users_queue": [user.public_primary_key],
+ "from_time": None,
+ "to_time": None,
+ "num_alerts_in_window": None,
+ "num_minutes_in_window": None,
+ "slack_integration_required": False,
+ "custom_button_trigger": None,
+ "notify_schedule": None,
+ "notify_to_group": None,
+ "important": False,
+ },
+ {
+ "id": escalation_policy_with_two_users.public_primary_key,
+ "order": 1,
+ "step": 13,
+ "wait_delay": None,
+ "escalation_chain": escalation_chain.public_primary_key,
+ "notify_to_users_queue": [user.public_primary_key, second_user.public_primary_key],
+ "from_time": None,
+ "to_time": None,
+ "num_alerts_in_window": None,
+ "num_minutes_in_window": None,
+ "slack_integration_required": False,
+ "custom_button_trigger": None,
+ "notify_schedule": None,
+ "notify_to_group": None,
+ "important": False,
+ },
+ ]
+
+ url = reverse("api-internal:escalation_policy-list")
+
+ response = client.get(f"{url}?user={user.public_primary_key}", format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+
+ assert response.json() == expected_payload
+
+
+@pytest.mark.django_db
+def test_escalation_policy_filter_by_slack_channel(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_escalation_chain,
+ make_slack_channel,
+ make_escalation_policy,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ slack_channel = make_slack_channel(organization.slack_team_identity)
+ escalation_chain = make_escalation_chain(organization)
+ other_escalation_chain = make_escalation_chain(organization)
+ make_channel_filter(
+ alert_receive_channel,
+ escalation_chain=escalation_chain,
+ is_default=False,
+ slack_channel_id=slack_channel.slack_id,
+ )
+
+ client = APIClient()
+
+ make_escalation_policy(
+ escalation_chain=other_escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_WAIT,
+ )
+
+ escalation_policy_from_alert_receive_channel_with_slack_channel = make_escalation_policy(
+ escalation_chain=escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_WAIT,
+ )
+ expected_payload = [
+ {
+ "id": escalation_policy_from_alert_receive_channel_with_slack_channel.public_primary_key,
+ "order": 0,
+ "step": 0,
+ "wait_delay": None,
+ "escalation_chain": escalation_chain.public_primary_key,
+ "notify_to_users_queue": [],
+ "from_time": None,
+ "to_time": None,
+ "num_alerts_in_window": None,
+ "num_minutes_in_window": None,
+ "slack_integration_required": False,
+ "custom_button_trigger": None,
+ "notify_schedule": None,
+ "notify_to_group": None,
+ "important": False,
+ },
+ ]
+
+ url = reverse("api-internal:escalation_policy-list")
+
+ response = client.get(
+ f"{url}?slack_channel={slack_channel.slack_id}", format="json", **make_user_auth_headers(user, token)
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+
+ assert response.json() == expected_payload
diff --git a/engine/apps/api/tests/test_features.py b/engine/apps/api/tests/test_features.py
new file mode 100644
index 0000000000..e391b8fbc3
--- /dev/null
+++ b/engine/apps/api/tests/test_features.py
@@ -0,0 +1,59 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.api.views.features import FEATURE_LIVE_SETTINGS, FEATURE_SLACK, FEATURE_TELEGRAM
+
+
+@pytest.mark.django_db
+def test_features(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+):
+ """
+ Test access to features without credentials
+ """
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ client = APIClient()
+ url = reverse("api-internal:features")
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ assert isinstance(response.json(), list)
+
+
+@pytest.mark.django_db
+def test_select_features_all_enabled(
+ settings,
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ settings.FEATURE_SLACK_INTEGRATION_ENABLED = True
+ settings.FEATURE_TELEGRAM_INTEGRATION_ENABLED = True
+ settings.FEATURE_LIVE_SETTINGS_ENABLED = True
+ client = APIClient()
+ url = reverse("api-internal:features")
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == [FEATURE_SLACK, FEATURE_TELEGRAM, FEATURE_LIVE_SETTINGS]
+
+
+@pytest.mark.django_db
+def test_select_features_all_disabled(
+ settings,
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ settings.FEATURE_SLACK_INTEGRATION_ENABLED = False
+ settings.FEATURE_TELEGRAM_INTEGRATION_ENABLED = False
+ settings.FEATURE_LIVE_SETTINGS_ENABLED = False
+ client = APIClient()
+ url = reverse("api-internal:features")
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == []
diff --git a/engine/apps/api/tests/test_gitops.py b/engine/apps/api/tests/test_gitops.py
new file mode 100644
index 0000000000..ca19643308
--- /dev/null
+++ b/engine/apps/api/tests/test_gitops.py
@@ -0,0 +1,56 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from common.constants.role import Role
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_terraform_gitops_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_escalation_chain,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ make_escalation_chain(organization)
+
+ client = APIClient()
+
+ url = reverse("api-internal:terraform_file")
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_terraform_state_permissions(
+ make_organization_and_user_with_plugin_token, make_user_auth_headers, role, expected_status
+):
+ _, user, token = make_organization_and_user_with_plugin_token(role)
+ client = APIClient()
+
+ url = reverse("api-internal:terraform_imports")
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
diff --git a/engine/apps/api/tests/test_integration_heartbeat.py b/engine/apps/api/tests/test_integration_heartbeat.py
new file mode 100644
index 0000000000..048b512184
--- /dev/null
+++ b/engine/apps/api/tests/test_integration_heartbeat.py
@@ -0,0 +1,286 @@
+import json
+from unittest.mock import patch
+
+import pytest
+from django.urls import reverse
+from django.utils import timezone
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.test import APIClient
+
+from apps.heartbeat.models import IntegrationHeartBeat
+from common.constants.role import Role
+
+MOCK_LAST_HEARTBEAT_TIME_VERBAL = "a moment"
+
+
+@pytest.fixture()
+def integration_heartbeat_internal_api_setup(
+ make_organization_and_user_with_plugin_token, make_alert_receive_channel, make_integration_heartbeat
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ now = timezone.now()
+ integration_heartbeat = make_integration_heartbeat(alert_receive_channel, last_heartbeat_time=now)
+ return user, token, alert_receive_channel, integration_heartbeat
+
+
+@pytest.mark.django_db
+@patch(
+ "apps.api.serializers.integration_heartbeat.IntegrationHeartBeatSerializer.get_instruction",
+ return_value="Grafana instruction
",
+)
+@patch(
+ "apps.api.serializers.integration_heartbeat.IntegrationHeartBeatSerializer._last_heartbeat_time_verbal",
+ return_value=MOCK_LAST_HEARTBEAT_TIME_VERBAL,
+)
+def test_get_list_integration_heartbeat(
+ mocked_verbal,
+ mocked_instruction,
+ integration_heartbeat_internal_api_setup,
+ make_user_auth_headers,
+):
+ user, token, alert_receive_channel, integration_heartbeat = integration_heartbeat_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:integration_heartbeat-list")
+
+ expected_payload = [
+ {
+ "id": integration_heartbeat.public_primary_key,
+ "last_heartbeat_time_verbal": mocked_verbal.return_value,
+ "alert_receive_channel": alert_receive_channel.public_primary_key,
+ "link": integration_heartbeat.link,
+ "timeout_seconds": 60,
+ "status": True,
+ "instruction": mocked_instruction.return_value,
+ }
+ ]
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_payload
+
+
+@pytest.mark.django_db
+@patch(
+ "apps.api.serializers.integration_heartbeat.IntegrationHeartBeatSerializer.get_instruction",
+ return_value="
Grafana instruction
",
+)
+@patch(
+ "apps.api.serializers.integration_heartbeat.IntegrationHeartBeatSerializer._last_heartbeat_time_verbal",
+ return_value=MOCK_LAST_HEARTBEAT_TIME_VERBAL,
+)
+def test_get_detail_integration_heartbeat(
+ mocked_verbal,
+ mocked_instruction,
+ integration_heartbeat_internal_api_setup,
+ make_user_auth_headers,
+):
+ user, token, alert_receive_channel, integration_heartbeat = integration_heartbeat_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:integration_heartbeat-detail", kwargs={"pk": integration_heartbeat.public_primary_key})
+
+ expected_payload = {
+ "id": integration_heartbeat.public_primary_key,
+ "last_heartbeat_time_verbal": mocked_verbal.return_value,
+ "alert_receive_channel": alert_receive_channel.public_primary_key,
+ "link": integration_heartbeat.link,
+ "timeout_seconds": 60,
+ "status": True,
+ "instruction": mocked_instruction.return_value,
+ }
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_payload
+
+
+@pytest.mark.django_db
+@patch(
+ "apps.api.serializers.integration_heartbeat.IntegrationHeartBeatSerializer.get_instruction",
+ return_value="
Grafana instruction
",
+)
+def test_create_integration_heartbeat(
+ mocked_instruction,
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ client = APIClient()
+ url = reverse("api-internal:integration_heartbeat-list")
+
+ data_for_create = {"alert_receive_channel": alert_receive_channel.public_primary_key, "timeout_seconds": 60}
+ response = client.post(url, data_for_create, format="json", **make_user_auth_headers(user, token))
+
+ integration_heartbeat = IntegrationHeartBeat.objects.get(public_primary_key=response.data["id"])
+
+ expected_payload = {
+ "id": integration_heartbeat.public_primary_key,
+ "alert_receive_channel": alert_receive_channel.public_primary_key,
+ "last_heartbeat_time_verbal": None,
+ "timeout_seconds": 60,
+ "link": integration_heartbeat.link,
+ "status": False,
+ "instruction": mocked_instruction.return_value,
+ }
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.json() == expected_payload
+
+
+@pytest.mark.django_db
+def test_create_invalid_timeout_integration_heartbeat(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ client = APIClient()
+ url = reverse("api-internal:integration_heartbeat-list")
+
+ data_for_create = {"alert_receive_channel": alert_receive_channel.public_primary_key, "timeout_seconds": 71}
+ response = client.post(url, data_for_create, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_create_empty_alert_receive_channel_integration_heartbeat(
+ integration_heartbeat_internal_api_setup,
+ make_user_auth_headers,
+):
+ user, token, alert_receive_channel, integration_heartbeat = integration_heartbeat_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:integration_heartbeat-list")
+
+ data_for_create = {"timeout_seconds": 60}
+ response = client.post(url, data_for_create, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_update_integration_heartbeat(
+ integration_heartbeat_internal_api_setup,
+ make_user_auth_headers,
+):
+ user, token, alert_receive_channel, integration_heartbeat = integration_heartbeat_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:integration_heartbeat-detail", kwargs={"pk": integration_heartbeat.public_primary_key})
+
+ data = {
+ "alert_receive_channel": alert_receive_channel.public_primary_key,
+ "timeout_seconds": 600,
+ }
+ response = client.put(
+ url, data=json.dumps(data), content_type="application/json", **make_user_auth_headers(user, token)
+ )
+ updated_instance = IntegrationHeartBeat.objects.get(public_primary_key=integration_heartbeat.public_primary_key)
+ assert response.status_code == status.HTTP_200_OK
+ assert updated_instance.timeout_seconds == 600
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_integration_heartbeat_update_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_integration_heartbeat,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ integration_heartbeat = make_integration_heartbeat(alert_receive_channel)
+ client = APIClient()
+
+ url = reverse("api-internal:integration_heartbeat-detail", kwargs={"pk": integration_heartbeat.public_primary_key})
+
+ with patch(
+ "apps.api.views.integration_heartbeat.IntegrationHeartBeatView.update",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.put(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+ response = client.patch(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [(Role.ADMIN, status.HTTP_200_OK), (Role.EDITOR, status.HTTP_200_OK), (Role.VIEWER, status.HTTP_200_OK)],
+)
+def test_integration_heartbeat_list_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_integration_heartbeat,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ make_integration_heartbeat(alert_receive_channel)
+ client = APIClient()
+
+ url = reverse("api-internal:integration_heartbeat-list")
+
+ with patch(
+ "apps.api.views.integration_heartbeat.IntegrationHeartBeatView.list",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_integration_heartbeat_retrieve_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_alert_receive_channel,
+ make_integration_heartbeat,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ integration_heartbeat = make_integration_heartbeat(alert_receive_channel)
+ client = APIClient()
+
+ url = reverse("api-internal:integration_heartbeat-detail", kwargs={"pk": integration_heartbeat.public_primary_key})
+
+ with patch(
+ "apps.api.views.integration_heartbeat.IntegrationHeartBeatView.retrieve",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
diff --git a/engine/apps/api/tests/test_live_settings.py b/engine/apps/api/tests/test_live_settings.py
new file mode 100644
index 0000000000..1cfecebdeb
--- /dev/null
+++ b/engine/apps/api/tests/test_live_settings.py
@@ -0,0 +1,100 @@
+from unittest import mock
+
+import pytest
+from django.urls import reverse
+from rest_framework.status import HTTP_200_OK
+from rest_framework.test import APIClient
+
+
+@pytest.mark.django_db
+def test_list_live_setting(
+ make_organization_and_user_with_slack_identities,
+ make_user_auth_headers,
+ make_token_for_organization,
+ settings,
+):
+ settings.FEATURE_LIVE_SETTINGS_ENABLED = True
+
+ organization, user, _, _ = make_organization_and_user_with_slack_identities()
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:live_settings-list")
+
+ response = client.get(url, **make_user_auth_headers(user, token))
+
+ assert response.status_code == HTTP_200_OK
+
+
+@mock.patch("apps.slack.tasks.unpopulate_slack_user_identities.apply_async", return_value=None)
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "setting_name",
+ [
+ "SLACK_CLIENT_OAUTH_ID",
+ "SLACK_CLIENT_OAUTH_SECRET",
+ ],
+)
+def test_live_settings_update_trigger_unpopulate_slack_identities(
+ mocked_unpopulate_task,
+ make_organization_and_user_with_slack_identities,
+ make_user_auth_headers,
+ make_token_for_organization,
+ make_live_setting,
+ settings,
+ setting_name,
+):
+ settings.FEATURE_LIVE_SETTINGS_ENABLED = True
+
+ organization, user, _, _ = make_organization_and_user_with_slack_identities()
+ _, token = make_token_for_organization(organization)
+ setattr(settings, setting_name, "default_setting_value")
+ client = APIClient()
+ live_setting = make_live_setting(name=setting_name, value="default_setting_value")
+ url = reverse("api-internal:live_settings-detail", kwargs={"pk": live_setting.public_primary_key})
+ data_to_put = {
+ "id": live_setting.public_primary_key,
+ "value": "987654321987.987654321987",
+ "name": setting_name,
+ }
+ response = client.put(url, data=data_to_put, format="json", **make_user_auth_headers(user, token))
+ assert mocked_unpopulate_task.called
+
+ assert response.status_code == HTTP_200_OK
+
+
+@mock.patch("apps.slack.tasks.unpopulate_slack_user_identities.apply_async", return_value=None)
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "setting_name",
+ [
+ "SLACK_CLIENT_OAUTH_ID",
+ "SLACK_CLIENT_OAUTH_SECRET",
+ ],
+)
+def test_live_settings_update_not_trigger_unpopulate_slack_identities(
+ mocked_unpopulate_task,
+ make_organization_and_user_with_slack_identities,
+ make_user_auth_headers,
+ make_token_for_organization,
+ make_live_setting,
+ settings,
+ setting_name,
+):
+ settings.FEATURE_LIVE_SETTINGS_ENABLED = True
+
+ organization, user, _, _ = make_organization_and_user_with_slack_identities()
+ _, token = make_token_for_organization(organization)
+ setattr(settings, setting_name, "default_setting_value")
+ client = APIClient()
+ live_setting = make_live_setting(name=setting_name, value="default_setting_value")
+ url = reverse("api-internal:live_settings-detail", kwargs={"pk": live_setting.public_primary_key})
+ data_to_put = {
+ "id": live_setting.public_primary_key,
+ "value": "default_setting_value",
+ "name": setting_name,
+ }
+ response = client.put(url, data=data_to_put, format="json", **make_user_auth_headers(user, token))
+ assert not mocked_unpopulate_task.called
+
+ assert response.status_code == HTTP_200_OK
diff --git a/engine/apps/api/tests/test_maintenance.py b/engine/apps/api/tests/test_maintenance.py
new file mode 100644
index 0000000000..dc140d671b
--- /dev/null
+++ b/engine/apps/api/tests/test_maintenance.py
@@ -0,0 +1,172 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.alerts.models import AlertReceiveChannel
+from apps.user_management.models import Organization
+
+
+@pytest.fixture()
+def maintenance_internal_api_setup(
+ make_organization_and_user_with_plugin_token,
+ make_escalation_chain,
+ make_alert_receive_channel,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ make_escalation_chain(organization)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ return token, organization, user, alert_receive_channel
+
+
+@pytest.mark.django_db
+def test_start_maintenance_integration(
+ maintenance_internal_api_setup, mock_start_disable_maintenance_task, make_user_auth_headers
+):
+ token, organization, user, alert_receive_channel = maintenance_internal_api_setup
+ client = APIClient()
+
+ url = reverse("api-internal:start_maintenance")
+ data = {
+ "mode": AlertReceiveChannel.MAINTENANCE,
+ "duration": AlertReceiveChannel.DURATION_ONE_HOUR.total_seconds(),
+ "type": "alert_receive_channel",
+ "alert_receive_channel_id": alert_receive_channel.public_primary_key,
+ }
+ response = client.post(url, data=data, format="json", **make_user_auth_headers(user, token))
+
+ alert_receive_channel.refresh_from_db()
+ assert response.status_code == status.HTTP_200_OK
+ assert alert_receive_channel.maintenance_mode == AlertReceiveChannel.MAINTENANCE
+ assert alert_receive_channel.maintenance_duration == AlertReceiveChannel.DURATION_ONE_HOUR
+ assert alert_receive_channel.maintenance_uuid is not None
+ assert alert_receive_channel.maintenance_started_at is not None
+ assert alert_receive_channel.maintenance_author is not None
+
+
+@pytest.mark.django_db
+def test_stop_maintenance_integration(
+ maintenance_internal_api_setup,
+ mock_start_disable_maintenance_task,
+ make_user_auth_headers,
+):
+ token, organization, user, alert_receive_channel = maintenance_internal_api_setup
+ client = APIClient()
+ mode = AlertReceiveChannel.MAINTENANCE
+ duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds
+ alert_receive_channel.start_maintenance(mode, duration, user)
+ url = reverse("api-internal:stop_maintenance")
+ data = {
+ "type": "alert_receive_channel",
+ "alert_receive_channel_id": alert_receive_channel.public_primary_key,
+ }
+ response = client.post(url, data=data, format="json", **make_user_auth_headers(user, token))
+ alert_receive_channel.refresh_from_db()
+ assert response.status_code == status.HTTP_200_OK
+ assert alert_receive_channel.maintenance_mode is None
+ assert alert_receive_channel.maintenance_duration is None
+ assert alert_receive_channel.maintenance_uuid is None
+ assert alert_receive_channel.maintenance_started_at is None
+ assert alert_receive_channel.maintenance_author is None
+
+
+@pytest.mark.django_db
+def test_start_maintenance_organization(
+ maintenance_internal_api_setup,
+ mock_start_disable_maintenance_task,
+ make_user_auth_headers,
+):
+ token, organization, user, _ = maintenance_internal_api_setup
+ client = APIClient()
+
+ url = reverse("api-internal:start_maintenance")
+ data = {
+ "mode": Organization.MAINTENANCE,
+ "duration": Organization.DURATION_ONE_HOUR.total_seconds(),
+ "type": "organization",
+ }
+ response = client.post(url, data=data, format="json", **make_user_auth_headers(user, token))
+
+ organization.refresh_from_db()
+ assert response.status_code == status.HTTP_200_OK
+ assert organization.maintenance_mode == Organization.MAINTENANCE
+ assert organization.maintenance_duration == Organization.DURATION_ONE_HOUR
+ assert organization.maintenance_uuid is not None
+ assert organization.maintenance_started_at is not None
+ assert organization.maintenance_author is not None
+
+
+@pytest.mark.django_db
+def test_stop_maintenance_team(
+ maintenance_internal_api_setup,
+ mock_start_disable_maintenance_task,
+ make_user_auth_headers,
+):
+ token, organization, user, _ = maintenance_internal_api_setup
+ client = APIClient()
+ mode = Organization.MAINTENANCE
+ duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds
+ organization.start_maintenance(mode, duration, user)
+ url = reverse("api-internal:stop_maintenance")
+ data = {
+ "type": "organization",
+ }
+ response = client.post(url, data=data, format="json", **make_user_auth_headers(user, token))
+ organization.refresh_from_db()
+ assert response.status_code == status.HTTP_200_OK
+ assert organization.maintenance_mode is None
+ assert organization.maintenance_duration is None
+ assert organization.maintenance_uuid is None
+ assert organization.maintenance_started_at is None
+ assert organization.maintenance_author is None
+
+
+@pytest.mark.django_db
+def test_maintenances_list(
+ maintenance_internal_api_setup,
+ mock_start_disable_maintenance_task,
+ make_user_auth_headers,
+):
+ token, organization, user, alert_receive_channel = maintenance_internal_api_setup
+ client = APIClient()
+ mode = AlertReceiveChannel.MAINTENANCE
+ duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds
+ alert_receive_channel.start_maintenance(mode, duration, user)
+ organization.start_maintenance(mode, duration, user)
+ url = reverse("api-internal:maintenance")
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ expected_payload = [
+ {
+ "organization_id": organization.public_primary_key,
+ "type": "organization",
+ "maintenance_mode": 1,
+ "maintenance_till_timestamp": organization.till_maintenance_timestamp,
+ "started_at_timestamp": organization.started_at_timestamp,
+ },
+ {
+ "alert_receive_channel_id": alert_receive_channel.public_primary_key,
+ "type": "alert_receive_channel",
+ "maintenance_mode": 1,
+ "maintenance_till_timestamp": alert_receive_channel.till_maintenance_timestamp,
+ "started_at_timestamp": alert_receive_channel.started_at_timestamp,
+ },
+ ]
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_payload
+
+
+@pytest.mark.django_db
+def test_empty_maintenances_list(
+ maintenance_internal_api_setup, mock_start_disable_maintenance_task, make_user_auth_headers
+):
+ token, organization, user, alert_receive_channel = maintenance_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:maintenance")
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ expected_payload = []
+ alert_receive_channel.refresh_from_db()
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_payload
diff --git a/engine/apps/api/tests/test_organization.py b/engine/apps/api/tests/test_organization.py
new file mode 100644
index 0000000000..0b97701eac
--- /dev/null
+++ b/engine/apps/api/tests/test_organization.py
@@ -0,0 +1,190 @@
+from unittest.mock import patch
+
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.test import APIClient
+
+from common.constants.role import Role
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_current_team_retrieve_permissions(
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ org = make_organization()
+ tester = make_user_for_organization(org, role=role)
+ _, token = make_token_for_organization(org)
+
+ client = APIClient()
+
+ url = reverse("api-internal:api-current-team")
+ with patch(
+ "apps.api.views.organization.CurrentOrganizationView.get",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(tester, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_current_team_update_permissions(
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ org = make_organization()
+ tester = make_user_for_organization(org, role=role)
+ _, token = make_token_for_organization(org)
+
+ client = APIClient()
+
+ url = reverse("api-internal:api-current-team")
+
+ with patch(
+ "apps.api.views.organization.CurrentOrganizationView.put",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.put(url, format="json", **make_user_auth_headers(tester, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize("feature_flag_enabled", [False, True])
+def test_current_team_messaging_backend_status(
+ settings,
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+ feature_flag_enabled,
+):
+ org = make_organization()
+ tester = make_user_for_organization(org, role=Role.ADMIN)
+ _, token = make_token_for_organization(org)
+
+ client = APIClient()
+
+ settings.FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED = feature_flag_enabled
+ url = reverse("api-internal:api-current-team")
+ response = client.get(url, format="json", **make_user_auth_headers(tester, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["env_status"]["extra_messaging_backends_enabled"] == bool(feature_flag_enabled)
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_current_team_get_telegram_verification_code_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, tester, token = make_organization_and_user_with_plugin_token(role)
+
+ client = APIClient()
+
+ url = reverse("api-internal:api-get-telegram-verification-code")
+ response = client.get(url, format="json", **make_user_auth_headers(tester, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_current_team_get_channel_verification_code_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, tester, token = make_organization_and_user_with_plugin_token(role)
+
+ client = APIClient()
+
+ url = reverse("api-internal:api-get-channel-verification-code") + "?backend=TESTONLY"
+ response = client.get(url, format="json", **make_user_auth_headers(tester, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+def test_current_team_get_channel_verification_code_ok(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+):
+ organization, tester, token = make_organization_and_user_with_plugin_token(Role.ADMIN)
+
+ client = APIClient()
+
+ url = reverse("api-internal:api-get-channel-verification-code") + "?backend=TESTONLY"
+ with patch(
+ "apps.base.tests.messaging_backend.TestOnlyBackend.generate_channel_verification_code",
+ return_value="the-code",
+ ) as mock_generate_code:
+ response = client.get(url, format="json", **make_user_auth_headers(tester, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == "the-code"
+ mock_generate_code.assert_called_once_with(organization)
+
+
+@pytest.mark.django_db
+def test_current_team_get_channel_verification_code_invalid(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+):
+ organization, tester, token = make_organization_and_user_with_plugin_token(Role.ADMIN)
+
+ client = APIClient()
+
+ url = reverse("api-internal:api-get-channel-verification-code") + "?backend=INVALID"
+ response = client.get(url, format="json", **make_user_auth_headers(tester, token))
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
diff --git a/engine/apps/api/tests/test_organization_log_record.py b/engine/apps/api/tests/test_organization_log_record.py
new file mode 100644
index 0000000000..bf48368ba0
--- /dev/null
+++ b/engine/apps/api/tests/test_organization_log_record.py
@@ -0,0 +1,243 @@
+from unittest.mock import patch
+
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.test import APIClient
+
+from apps.base.models import OrganizationLogRecord
+from apps.user_management.organization_log_creator import OrganizationLogType
+from common.constants.role import Role
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_organization_log_records_permissions(
+ make_organization_and_user_with_plugin_token, make_user_auth_headers, role, expected_status
+):
+ _, user, token = make_organization_and_user_with_plugin_token(role)
+ client = APIClient()
+ url = reverse("api-internal:organization_log-list")
+
+ with patch(
+ "apps.api.views.organization_log_record.OrganizationLogRecordView.list",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_organization_log_records_filters_permissions(
+ make_organization_and_user_with_plugin_token, make_user_auth_headers, role, expected_status
+):
+ _, user, token = make_organization_and_user_with_plugin_token(role)
+ client = APIClient()
+ url = reverse("api-internal:organization_log-filters")
+
+ with patch(
+ "apps.api.views.organization_log_record.OrganizationLogRecordView.filters",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_organization_log_records_label_options_permissions(
+ make_organization_and_user_with_plugin_token, make_user_auth_headers, role, expected_status
+):
+ _, user, token = make_organization_and_user_with_plugin_token(role)
+ client = APIClient()
+ url = reverse("api-internal:organization_log-label-options")
+
+ with patch(
+ "apps.api.views.organization_log_record.OrganizationLogRecordView.label_options",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+def test_get_filter_created_at(
+ make_organization_and_user_with_plugin_token,
+ make_organization_log_record,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ client = APIClient()
+ make_organization_log_record(organization, user)
+
+ url = reverse("api-internal:organization_log-list")
+ response = client.get(
+ url + "?created_at=1970-01-01T00:00:00/2099-01-01T23:59:59",
+ format="json",
+ **make_user_auth_headers(user, token),
+ )
+ assert response.status_code == status.HTTP_200_OK
+ assert len(response.data["results"]) == 1
+
+
+@pytest.mark.django_db
+def test_get_filter_created_at_empty_result(
+ make_organization_and_user_with_plugin_token,
+ make_organization_log_record,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ client = APIClient()
+ make_organization_log_record(organization, user)
+
+ url = reverse("api-internal:organization_log-list")
+ response = client.get(
+ f"{url}?created_at=1970-01-01T00:00:00/1970-01-01T23:59:59",
+ format="json",
+ **make_user_auth_headers(user, token),
+ )
+ assert response.status_code == status.HTTP_200_OK
+ assert len(response.data["results"]) == 0
+
+
+@pytest.mark.django_db
+def test_get_filter_created_at_invalid_format(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ client = APIClient()
+ url = reverse("api-internal:organization_log-list")
+ response = client.get(f"{url}?created_at=invalid_date_format", format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.skip(reason="SQLITE Incompatibility")
+@pytest.mark.django_db
+def test_get_filter_by_labels(
+ make_organization_and_user_with_plugin_token,
+ make_organization_log_record,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ client = APIClient()
+
+ # create log that contains LABEL_SLACK and LABEL_DEFAULT_CHANNEL
+ make_organization_log_record(organization, user, type=OrganizationLogType.TYPE_SLACK_DEFAULT_CHANNEL_CHANGED)
+ # create log that contains LABEL_SLACK but does not contain LABEL_DEFAULT_CHANNEL
+ make_organization_log_record(organization, user, type=OrganizationLogType.TYPE_SLACK_WORKSPACE_DISCONNECTED)
+ # create log that does not contain labels from search
+ make_organization_log_record(organization, user, type=OrganizationLogType.TYPE_INTEGRATION_CREATED)
+
+ url = reverse("api-internal:organization_log-list")
+ # search by one label: LABEL_SLACK
+ response = client.get(
+ f"{url}?labels={OrganizationLogRecord.LABEL_SLACK}", format="json", **make_user_auth_headers(user, token)
+ )
+ assert response.status_code == status.HTTP_200_OK
+ assert len(response.data["results"]) == 2
+ response_log_labels = [log["labels"] for log in response.data["results"]]
+ for labels in response_log_labels:
+ assert OrganizationLogRecord.LABEL_SLACK in labels
+
+ # search by two labels: LABEL_SLACK and LABEL_DEFAULT_CHANNEL
+ response = client.get(
+ f"{url}?labels={OrganizationLogRecord.LABEL_SLACK}&labels={OrganizationLogRecord.LABEL_DEFAULT_CHANNEL}",
+ format="json",
+ **make_user_auth_headers(user, token),
+ )
+ assert response.status_code == status.HTTP_200_OK
+ assert len(response.data["results"]) == 1
+ response_log_labels = [log["labels"] for log in response.data["results"]]
+ for labels in response_log_labels:
+ assert OrganizationLogRecord.LABEL_SLACK in labels
+ assert OrganizationLogRecord.LABEL_DEFAULT_CHANNEL in labels
+
+
+@pytest.mark.django_db
+def test_get_filter_author(
+ make_organization_and_user_with_plugin_token,
+ make_user_for_organization,
+ make_organization_log_record,
+ make_user_auth_headers,
+):
+ client = APIClient()
+
+ organization, first_user, token = make_organization_and_user_with_plugin_token()
+ second_user = make_user_for_organization(organization)
+ make_organization_log_record(organization, first_user)
+
+ url = reverse("api-internal:organization_log-list")
+ first_response = client.get(
+ f"{url}?author={first_user.public_primary_key}", format="json", **make_user_auth_headers(first_user, token)
+ )
+ assert first_response.status_code == status.HTTP_200_OK
+ assert len(first_response.data["results"]) == 1
+
+ second_response = client.get(
+ f"{url}?author={second_user.public_primary_key}", format="json", **make_user_auth_headers(first_user, token)
+ )
+ assert second_response.status_code == status.HTTP_200_OK
+ assert len(second_response.data["results"]) == 0
+
+
+@pytest.mark.django_db
+def test_get_filter_author_multiple_values(
+ make_organization_and_user_with_plugin_token,
+ make_user_for_organization,
+ make_organization_log_record,
+ make_user_auth_headers,
+):
+ client = APIClient()
+
+ organization, first_user, token = make_organization_and_user_with_plugin_token()
+ second_user = make_user_for_organization(organization)
+ third_user = make_user_for_organization(organization)
+ make_organization_log_record(organization, first_user)
+ make_organization_log_record(organization, second_user)
+
+ url = reverse("api-internal:organization_log-list")
+ first_response = client.get(
+ f"{url}?author={first_user.public_primary_key}&author={second_user.public_primary_key}",
+ format="json",
+ **make_user_auth_headers(first_user, token),
+ )
+ assert first_response.status_code == status.HTTP_200_OK
+ assert len(first_response.data["results"]) == 2
+
+ second_response = client.get(
+ f"{url}?author={first_user.public_primary_key}&author={third_user.public_primary_key}",
+ format="json",
+ **make_user_auth_headers(first_user, token),
+ )
+ assert second_response.status_code == status.HTTP_200_OK
+ assert len(second_response.data["results"]) == 1
diff --git a/engine/apps/api/tests/test_postmortem_messages.py b/engine/apps/api/tests/test_postmortem_messages.py
new file mode 100644
index 0000000000..fe45ded0de
--- /dev/null
+++ b/engine/apps/api/tests/test_postmortem_messages.py
@@ -0,0 +1,395 @@
+from unittest.mock import patch
+
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.test import APIClient
+
+from apps.alerts.models import ResolutionNote
+from common.constants.role import Role
+
+
+@pytest.mark.django_db
+def test_create_resolution_note(
+ make_organization_and_user_with_plugin_token, make_user_auth_headers, make_alert_receive_channel, make_alert_group
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ client = APIClient()
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ url = reverse("api-internal:resolution_note-list")
+
+ data = {
+ "alert_group": alert_group.public_primary_key,
+ "text": "Test Message",
+ }
+
+ response = client.post(url, data=data, format="json", **make_user_auth_headers(user, token))
+
+ resolution_note = ResolutionNote.objects.get(public_primary_key=response.data["id"])
+
+ result = {
+ "id": resolution_note.public_primary_key,
+ "alert_group": alert_group.public_primary_key,
+ "source": {
+ "id": resolution_note.source,
+ "display_name": resolution_note.get_source_display(),
+ },
+ "author": {
+ "pk": user.public_primary_key,
+ "username": user.username,
+ },
+ "created_at": response.data["created_at"],
+ "text": data["text"],
+ }
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.data == result
+
+
+@pytest.mark.django_db
+def test_create_resolution_note_invalid_text(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ make_alert_group,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ client = APIClient()
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ url = reverse("api-internal:resolution_note-list")
+
+ data = {
+ "alert_group": alert_group.public_primary_key,
+ "text": "",
+ }
+
+ response = client.post(url, data=data, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+ assert response.data["text"][0] == "This field may not be blank."
+
+
+@pytest.mark.django_db
+def test_update_resolution_note(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_resolution_note,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ client = APIClient()
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ resolution_note = make_resolution_note(
+ alert_group=alert_group,
+ source=ResolutionNote.Source.WEB,
+ author=user,
+ )
+
+ url = reverse("api-internal:resolution_note-detail", kwargs={"pk": resolution_note.public_primary_key})
+
+ data = {
+ "text": "Test Message",
+ }
+
+ assert resolution_note.text != data["text"]
+
+ response = client.put(url, data=data, format="json", **make_user_auth_headers(user, token))
+
+ result = {
+ "id": resolution_note.public_primary_key,
+ "alert_group": alert_group.public_primary_key,
+ "source": {
+ "id": resolution_note.source,
+ "display_name": resolution_note.get_source_display(),
+ },
+ "author": {
+ "pk": user.public_primary_key,
+ "username": user.username,
+ },
+ "created_at": response.data["created_at"],
+ "text": data["text"],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ resolution_note.refresh_from_db()
+ assert resolution_note.text == result["text"]
+ assert response.data == result
+
+
+@pytest.mark.django_db
+def test_update_resolution_note_invalid_source(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_resolution_note,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ client = APIClient()
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ resolution_note = make_resolution_note(
+ alert_group=alert_group,
+ source=ResolutionNote.Source.SLACK,
+ author=user,
+ )
+
+ url = reverse("api-internal:resolution_note-detail", kwargs={"pk": resolution_note.public_primary_key})
+
+ data = {
+ "text": "Test Message",
+ }
+
+ assert resolution_note.message_text != data["text"]
+
+ response = client.put(url, data=data, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+ resolution_note.refresh_from_db()
+ assert resolution_note.message_text != data["text"]
+ assert response.data["detail"] == "Cannot update message with this source type"
+
+
+@pytest.mark.django_db
+def test_delete_resolution_note(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_resolution_note,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ client = APIClient()
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ resolution_note = make_resolution_note(
+ alert_group=alert_group,
+ source=ResolutionNote.Source.WEB,
+ author=user,
+ )
+
+ url = reverse("api-internal:resolution_note-detail", kwargs={"pk": resolution_note.public_primary_key})
+
+ assert resolution_note.deleted_at is None
+
+ response = client.delete(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+
+ resolution_note.refresh_from_db()
+
+ assert resolution_note.deleted_at is not None
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_404_NOT_FOUND
+ assert response.data["detail"] == "Not found."
+
+
+@patch(
+ "apps.api.views.resolution_note.ResolutionNoteView.create",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ data={}, # mock data with empty dict to satisfy overridden dispatch method in ResolutionNoteView
+ ),
+)
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_resolution_note_create_permissions(
+ mocked_create,
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ client = APIClient()
+
+ url = reverse("api-internal:resolution_note-list")
+
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@patch(
+ "apps.api.views.resolution_note.ResolutionNoteView.update",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ data={}, # mock data with empty dict to satisfy overridden dispatch method in ResolutionNoteView
+ ),
+)
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_resolution_note_update_permissions(
+ mocked_update,
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_resolution_note,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+ resolution_note = make_resolution_note(
+ alert_group=alert_group,
+ source=ResolutionNote.Source.WEB,
+ author=user,
+ )
+ client = APIClient()
+
+ url = reverse("api-internal:resolution_note-detail", kwargs={"pk": resolution_note.public_primary_key})
+
+ response = client.put(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+ response = client.patch(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@patch(
+ "apps.api.views.resolution_note.ResolutionNoteView.destroy",
+ return_value=Response(status=status.HTTP_204_NO_CONTENT, data={}),
+)
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_204_NO_CONTENT),
+ (Role.EDITOR, status.HTTP_204_NO_CONTENT),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_resolution_note_delete_permissions(
+ mocked_delete,
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_resolution_note,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+ resolution_note = make_resolution_note(
+ alert_group=alert_group,
+ source=ResolutionNote.Source.WEB,
+ author=user,
+ )
+ client = APIClient()
+
+ url = reverse("api-internal:resolution_note-detail", kwargs={"pk": resolution_note.public_primary_key})
+
+ response = client.delete(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@patch(
+ "apps.api.views.resolution_note.ResolutionNoteView.list",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+)
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_resolution_note_list_permissions(
+ mocked_list,
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ client = APIClient()
+
+ url = reverse("api-internal:resolution_note-list")
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@patch(
+ "apps.api.views.resolution_note.ResolutionNoteView.retrieve",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+)
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_resolution_note_detail_permissions(
+ mocked_detail,
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_resolution_note,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+ resolution_note = make_resolution_note(
+ alert_group=alert_group,
+ source=ResolutionNote.Source.WEB,
+ author=user,
+ )
+ client = APIClient()
+
+ url = reverse("api-internal:resolution_note-detail", kwargs={"pk": resolution_note.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
diff --git a/engine/apps/api/tests/test_preview_template_options.py b/engine/apps/api/tests/test_preview_template_options.py
new file mode 100644
index 0000000000..4f891bbde4
--- /dev/null
+++ b/engine/apps/api/tests/test_preview_template_options.py
@@ -0,0 +1,18 @@
+import pytest
+from django.urls import reverse
+from rest_framework.test import APIClient
+
+
+@pytest.mark.django_db
+def test_preview_template_options_include_additional_backends(
+ make_organization_and_user_with_plugin_token, make_user_auth_headers
+):
+ _, user, token = make_organization_and_user_with_plugin_token()
+ client = APIClient()
+
+ url = reverse(
+ "api-internal:preview_template_options",
+ )
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert "testonly" in response.json()["notification_channel_options"]
diff --git a/engine/apps/api/tests/test_route_regex_debugger.py b/engine/apps/api/tests/test_route_regex_debugger.py
new file mode 100644
index 0000000000..dbd3e97a5c
--- /dev/null
+++ b/engine/apps/api/tests/test_route_regex_debugger.py
@@ -0,0 +1,28 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+
+@pytest.mark.django_db
+def test_regex_is_required_for_route_regex_debugger(
+ make_organization_and_user_with_plugin_token, make_user_auth_headers, make_escalation_chain
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ make_escalation_chain(organization)
+ client = APIClient()
+ url = reverse("api-internal:route_regex_debugger")
+ response = client.get(url, format="text/plain", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_invalid_regex_for_route_regex_debugger(
+ make_organization_and_user_with_plugin_token, make_user_auth_headers, make_escalation_chain
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ make_escalation_chain(organization)
+ client = APIClient()
+ url = reverse("api-internal:route_regex_debugger")
+ response = client.get(f"{url}?regex=invalid_regex\\", format="text/plain", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
diff --git a/engine/apps/api/tests/test_schedule_export.py b/engine/apps/api/tests/test_schedule_export.py
new file mode 100644
index 0000000000..ecbdec7ea0
--- /dev/null
+++ b/engine/apps/api/tests/test_schedule_export.py
@@ -0,0 +1,155 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.auth_token.models import ScheduleExportAuthToken
+from apps.schedules.models import OnCallScheduleICal
+from common.constants.role import Role
+
+ICAL_URL = "https://calendar.google.com/calendar/ical/amixr.io_37gttuakhrtr75ano72p69rt78%40group.calendar.google.com/private-1d00a680ba5be7426c3eb3ef1616e26d/basic.ics" # noqa
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_get_schedule_export_token(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_schedule,
+ role,
+ expected_status,
+):
+
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ name="test_ical_schedule",
+ ical_url_primary=ICAL_URL,
+ )
+
+ ScheduleExportAuthToken.create_auth_token(user=user, organization=organization, schedule=schedule)
+
+ client = APIClient()
+
+ url = reverse("api-internal:schedule-export-token", kwargs={"pk": schedule.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_404_NOT_FOUND),
+ (Role.EDITOR, status.HTTP_404_NOT_FOUND),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_schedule_export_token_not_found(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_schedule,
+ role,
+ expected_status,
+):
+
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ name="test_ical_schedule",
+ ical_url_primary=ICAL_URL,
+ )
+
+ url = reverse("api-internal:schedule-export-token", kwargs={"pk": schedule.public_primary_key})
+
+ client = APIClient()
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_201_CREATED),
+ (Role.EDITOR, status.HTTP_201_CREATED),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_schedule_create_export_token(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_schedule,
+ role,
+ expected_status,
+):
+
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ name="test_ical_schedule",
+ ical_url_primary=ICAL_URL,
+ )
+
+ url = reverse("api-internal:schedule-export-token", kwargs={"pk": schedule.public_primary_key})
+
+ client = APIClient()
+
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert expected_status == response.status_code
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_204_NO_CONTENT),
+ (Role.EDITOR, status.HTTP_204_NO_CONTENT),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_schedule_delete_export_token(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_schedule,
+ role,
+ expected_status,
+):
+
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ name="test_ical_schedule",
+ ical_url_primary=ICAL_URL,
+ )
+
+ instance, _ = ScheduleExportAuthToken.create_auth_token(user=user, organization=organization, schedule=schedule)
+
+ url = reverse("api-internal:schedule-export-token", kwargs={"pk": schedule.public_primary_key})
+
+ client = APIClient()
+
+ response = client.delete(url, format="json", **make_user_auth_headers(user, token))
+
+ assert expected_status == response.status_code
+
+ if response.status_code != 403:
+ check_token = ScheduleExportAuthToken.objects.filter(id=instance.id)
+
+ assert len(check_token) == 0
diff --git a/engine/apps/api/tests/test_schedules.py b/engine/apps/api/tests/test_schedules.py
new file mode 100644
index 0000000000..29a8c530a5
--- /dev/null
+++ b/engine/apps/api/tests/test_schedules.py
@@ -0,0 +1,667 @@
+import json
+from unittest.mock import patch
+
+import pytest
+from django.urls import reverse
+from django.utils import timezone
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.serializers import ValidationError
+from rest_framework.test import APIClient
+
+from apps.schedules.models import CustomOnCallShift, OnCallSchedule, OnCallScheduleCalendar, OnCallScheduleICal
+from common.constants.role import Role
+
+ICAL_URL = "https://calendar.google.com/calendar/ical/amixr.io_37gttuakhrtr75ano72p69rt78%40group.calendar.google.com/private-1d00a680ba5be7426c3eb3ef1616e26d/basic.ics"
+
+
+@pytest.fixture()
+def schedule_internal_api_setup(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_slack_channel,
+ make_schedule,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+
+ slack_channel = make_slack_channel(
+ organization.slack_team_identity,
+ )
+
+ calendar_schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleCalendar,
+ name="test_calendar_schedule",
+ )
+
+ ical_schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ name="test_ical_schedule",
+ ical_url_primary=ICAL_URL,
+ )
+
+ return user, token, calendar_schedule, ical_schedule, slack_channel
+
+
+@pytest.mark.django_db
+def test_get_list_schedules(schedule_internal_api_setup, make_user_auth_headers):
+ user, token, calendar_schedule, ical_schedule, slack_channel = schedule_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:schedule-list")
+
+ expected_payload = [
+ {
+ "id": calendar_schedule.public_primary_key,
+ "type": 0,
+ "team": None,
+ "name": "test_calendar_schedule",
+ "time_zone": "UTC",
+ "slack_channel": None,
+ "user_group": None,
+ "warnings": [],
+ "ical_url_overrides": None,
+ "on_call_now": [],
+ "has_gaps": False,
+ "mention_oncall_next": False,
+ "mention_oncall_start": True,
+ "notify_empty_oncall": 0,
+ "notify_oncall_shift_freq": 1,
+ },
+ {
+ "id": ical_schedule.public_primary_key,
+ "type": 1,
+ "team": None,
+ "name": "test_ical_schedule",
+ "ical_url_primary": ICAL_URL,
+ "ical_url_overrides": None,
+ "slack_channel": None,
+ "user_group": None,
+ "warnings": [],
+ "on_call_now": [],
+ "has_gaps": False,
+ "mention_oncall_next": False,
+ "mention_oncall_start": True,
+ "notify_empty_oncall": 0,
+ "notify_oncall_shift_freq": 1,
+ },
+ ]
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_payload
+
+
+@pytest.mark.django_db
+def test_get_detail_calendar_schedule(schedule_internal_api_setup, make_user_auth_headers):
+ user, token, calendar_schedule, _, _ = schedule_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:schedule-detail", kwargs={"pk": calendar_schedule.public_primary_key})
+
+ expected_payload = {
+ "id": calendar_schedule.public_primary_key,
+ "type": 0,
+ "team": None,
+ "name": "test_calendar_schedule",
+ "time_zone": "UTC",
+ "slack_channel": None,
+ "user_group": None,
+ "warnings": [],
+ "ical_url_overrides": None,
+ "on_call_now": [],
+ "has_gaps": False,
+ "mention_oncall_next": False,
+ "mention_oncall_start": True,
+ "notify_empty_oncall": 0,
+ "notify_oncall_shift_freq": 1,
+ }
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_payload
+
+
+@pytest.mark.django_db
+def test_get_detail_ical_schedule(schedule_internal_api_setup, make_user_auth_headers):
+ user, token, _, ical_schedule, _ = schedule_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:schedule-detail", kwargs={"pk": ical_schedule.public_primary_key})
+
+ expected_payload = {
+ "id": ical_schedule.public_primary_key,
+ "team": None,
+ "ical_url_primary": ICAL_URL,
+ "ical_url_overrides": None,
+ "name": "test_ical_schedule",
+ "type": 1,
+ "slack_channel": None,
+ "user_group": None,
+ "warnings": [],
+ "on_call_now": [],
+ "has_gaps": False,
+ "mention_oncall_next": False,
+ "mention_oncall_start": True,
+ "notify_empty_oncall": 0,
+ "notify_oncall_shift_freq": 1,
+ }
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_payload
+
+
+@pytest.mark.django_db
+def test_create_calendar_schedule(schedule_internal_api_setup, make_user_auth_headers):
+ user, token, _, _, _ = schedule_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:schedule-list")
+ data = {
+ "ical_url_overrides": None,
+ "type": 0,
+ "name": "created_calendar_schedule",
+ "time_zone": "UTC",
+ "slack_channel_id": None,
+ "user_group": None,
+ "team": None,
+ "warnings": [],
+ "on_call_now": [],
+ "has_gaps": False,
+ "mention_oncall_next": False,
+ "mention_oncall_start": True,
+ "notify_empty_oncall": 0,
+ "notify_oncall_shift_freq": 1,
+ }
+ response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
+ # modify initial data by adding id and None for optional fields
+ schedule = OnCallSchedule.objects.get(public_primary_key=response.data["id"])
+ data["id"] = schedule.public_primary_key
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.data == data
+
+
+@pytest.mark.django_db
+def test_create_ical_schedule(schedule_internal_api_setup, make_user_auth_headers):
+ user, token, _, _, _ = schedule_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:schedule-list")
+ with patch(
+ "apps.api.serializers.schedule_ical.ScheduleICalSerializer.validate_ical_url_primary", return_value=ICAL_URL
+ ):
+ data = {
+ "ical_url_primary": ICAL_URL,
+ "ical_url_overrides": None,
+ "name": "created_ical_schedule",
+ "type": 1,
+ "slack_channel_id": None,
+ "user_group": None,
+ "team": None,
+ "warnings": [],
+ "on_call_now": [],
+ "has_gaps": False,
+ "mention_oncall_next": False,
+ "mention_oncall_start": True,
+ "notify_empty_oncall": 0,
+ "notify_oncall_shift_freq": 1,
+ }
+ response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
+ # modify initial data by adding id and None for optional fields
+ schedule = OnCallSchedule.objects.get(public_primary_key=response.data["id"])
+ data["id"] = schedule.public_primary_key
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.data == data
+
+
+@pytest.mark.django_db
+def test_create_invalid_ical_schedule(schedule_internal_api_setup, make_user_auth_headers):
+ user, token, _, ical_schedule, _ = schedule_internal_api_setup
+ client = APIClient()
+ url = reverse("api-internal:custom_button-list")
+ with patch(
+ "apps.api.serializers.schedule_ical.ScheduleICalSerializer.validate_ical_url_primary",
+ side_effect=ValidationError("Ical download failed"),
+ ):
+ data = {
+ "ical_url_primary": ICAL_URL,
+ "ical_url_overrides": None,
+ "name": "created_ical_schedule",
+ "type": 1,
+ }
+ response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_update_calendar_schedule(schedule_internal_api_setup, make_user_auth_headers):
+ user, token, calendar_schedule, _, _ = schedule_internal_api_setup
+ client = APIClient()
+
+ url = reverse("api-internal:schedule-detail", kwargs={"pk": calendar_schedule.public_primary_key})
+ data = {
+ "name": "updated_calendar_schedule",
+ "type": 0,
+ "team": None,
+ }
+ response = client.put(
+ url, data=json.dumps(data), content_type="application/json", **make_user_auth_headers(user, token)
+ )
+ updated_instance = OnCallSchedule.objects.get(public_primary_key=calendar_schedule.public_primary_key)
+ assert response.status_code == status.HTTP_200_OK
+ assert updated_instance.name == "updated_calendar_schedule"
+
+
+@pytest.mark.django_db
+def test_update_ical_schedule(schedule_internal_api_setup, make_user_auth_headers):
+ user, token, _, ical_schedule, _ = schedule_internal_api_setup
+ client = APIClient()
+
+ url = reverse("api-internal:schedule-detail", kwargs={"pk": ical_schedule.public_primary_key})
+ data = {
+ "name": "updated_ical_schedule",
+ "type": 1,
+ "team": None,
+ }
+ response = client.put(
+ url, data=json.dumps(data), content_type="application/json", **make_user_auth_headers(user, token)
+ )
+ updated_instance = OnCallSchedule.objects.get(public_primary_key=ical_schedule.public_primary_key)
+ assert response.status_code == status.HTTP_200_OK
+ assert updated_instance.name == "updated_ical_schedule"
+
+
+@pytest.mark.django_db
+def test_delete_schedule(schedule_internal_api_setup, make_user_auth_headers):
+ user, token, calendar_schedule, ical_schedule, _ = schedule_internal_api_setup
+ client = APIClient()
+
+ for calendar in (calendar_schedule, ical_schedule):
+ url = reverse("api-internal:schedule-detail", kwargs={"pk": calendar.public_primary_key})
+ response = client.delete(url, **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+
+
+@pytest.mark.django_db
+def test_events_calendar(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_schedule,
+ make_on_call_shift,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ client = APIClient()
+
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleCalendar,
+ name="test_calendar_schedule",
+ )
+
+ data = {
+ "start": timezone.now().replace(microsecond=0),
+ "duration": timezone.timedelta(seconds=7200),
+ "priority_level": 2,
+ }
+
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_SINGLE_EVENT, **data
+ )
+ on_call_shift.users.add(user)
+ schedule.custom_on_call_shifts.add(on_call_shift)
+
+ url = reverse("api-internal:schedule-events", kwargs={"pk": schedule.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ expected_result = {
+ "id": schedule.public_primary_key,
+ "name": "test_calendar_schedule",
+ "type": 0,
+ "slack_channel": None,
+ "events": [
+ {
+ "all_day": False,
+ "start": on_call_shift.start,
+ "end": on_call_shift.start + on_call_shift.duration,
+ "users": [{"display_name": user.username, "pk": user.public_primary_key}],
+ "priority_level": on_call_shift.priority_level,
+ "source": "api",
+ "calendar_type": OnCallSchedule.PRIMARY,
+ "is_empty": False,
+ "is_gap": False,
+ }
+ ],
+ }
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_result
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_schedule_create_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_schedule,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ name="test_ical_schedule",
+ ical_url_primary=ICAL_URL,
+ )
+
+ client = APIClient()
+ url = reverse("api-internal:schedule-list")
+
+ with patch(
+ "apps.api.views.schedule.ScheduleView.create",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_schedule_update_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_schedule,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ name="test_ical_schedule",
+ ical_url_primary=ICAL_URL,
+ )
+
+ client = APIClient()
+ url = reverse("api-internal:schedule-detail", kwargs={"pk": schedule.public_primary_key})
+
+ with patch(
+ "apps.api.views.schedule.ScheduleView.update",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.put(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+ response = client.patch(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [(Role.ADMIN, status.HTTP_200_OK), (Role.EDITOR, status.HTTP_200_OK), (Role.VIEWER, status.HTTP_200_OK)],
+)
+def test_schedule_list_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_schedule,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ name="test_ical_schedule",
+ ical_url_primary=ICAL_URL,
+ )
+
+ client = APIClient()
+ url = reverse("api-internal:schedule-list")
+
+ with patch(
+ "apps.api.views.schedule.ScheduleView.list",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [(Role.ADMIN, status.HTTP_200_OK), (Role.EDITOR, status.HTTP_200_OK), (Role.VIEWER, status.HTTP_200_OK)],
+)
+def test_schedule_retrieve_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_schedule,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ name="test_ical_schedule",
+ ical_url_primary=ICAL_URL,
+ )
+
+ client = APIClient()
+ url = reverse("api-internal:schedule-detail", kwargs={"pk": schedule.public_primary_key})
+
+ with patch(
+ "apps.api.views.schedule.ScheduleView.retrieve",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_204_NO_CONTENT),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_schedule_delete_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_schedule,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ name="test_ical_schedule",
+ ical_url_primary=ICAL_URL,
+ )
+
+ client = APIClient()
+ url = reverse("api-internal:schedule-detail", kwargs={"pk": schedule.public_primary_key})
+
+ with patch(
+ "apps.api.views.schedule.ScheduleView.destroy",
+ return_value=Response(
+ status=status.HTTP_204_NO_CONTENT,
+ ),
+ ):
+ response = client.delete(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_events_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_schedule,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ name="test_ical_schedule",
+ ical_url_primary=ICAL_URL,
+ )
+
+ client = APIClient()
+ url = reverse("api-internal:schedule-events", kwargs={"pk": schedule.public_primary_key})
+
+ with patch(
+ "apps.api.views.schedule.ScheduleView.events",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_reload_ical_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_schedule,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ name="test_ical_schedule",
+ ical_url_primary=ICAL_URL,
+ )
+
+ client = APIClient()
+ url = reverse("api-internal:schedule-reload-ical", kwargs={"pk": schedule.public_primary_key})
+
+ with patch(
+ "apps.api.views.schedule.ScheduleView.reload_ical",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_schedule_notify_oncall_shift_freq_options_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_schedule,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ url = reverse("api-internal:schedule-notify-oncall-shift-freq-options")
+ client = APIClient()
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_schedule_notify_empty_oncall_options_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_schedule,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ url = reverse("api-internal:schedule-notify-empty-oncall-options")
+ client = APIClient()
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_schedule_mention_options_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_schedule,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ url = reverse("api-internal:schedule-mention-options")
+ client = APIClient()
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
diff --git a/engine/apps/api/tests/test_set_general_log_channel.py b/engine/apps/api/tests/test_set_general_log_channel.py
new file mode 100644
index 0000000000..703dd324e2
--- /dev/null
+++ b/engine/apps/api/tests/test_set_general_log_channel.py
@@ -0,0 +1,32 @@
+from unittest.mock import patch
+
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.test import APIClient
+
+from common.constants.role import Role
+
+
+# Testing permissions, not view itself. So mock is ok here
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_set_general_log_channel_permissions(
+ make_organization_and_user_with_plugin_token, make_user_auth_headers, role, expected_status
+):
+ _, user, token = make_organization_and_user_with_plugin_token(role)
+ client = APIClient()
+
+ url = reverse("api-internal:api-set-general-log-channel")
+ with patch("apps.api.views.organization.SetGeneralChannel.post", return_value=Response(status=status.HTTP_200_OK)):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
diff --git a/engine/apps/api/tests/test_slack_channels.py b/engine/apps/api/tests/test_slack_channels.py
new file mode 100644
index 0000000000..37d2c05c7b
--- /dev/null
+++ b/engine/apps/api/tests/test_slack_channels.py
@@ -0,0 +1,64 @@
+from unittest.mock import patch
+
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.test import APIClient
+
+from common.constants.role import Role
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_slack_channels_list_permissions(
+ make_organization_and_user_with_plugin_token, make_user_auth_headers, role, expected_status
+):
+ _, user, token = make_organization_and_user_with_plugin_token(role)
+ client = APIClient()
+
+ url = reverse("api-internal:slack_channel-list")
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ with patch(
+ "apps.api.views.slack_channel.SlackChannelView.list",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_slack_channels_detail_permissions(
+ make_organization_and_user_with_plugin_token, make_user_auth_headers, role, make_slack_channel, expected_status
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role)
+ slack_channel = make_slack_channel(organization.slack_team_identity)
+ client = APIClient()
+
+ url = reverse("api-internal:slack_channel-detail", kwargs={"pk": slack_channel.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ with patch(
+ "apps.api.views.slack_channel.SlackChannelView.retrieve",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ assert response.status_code == expected_status
diff --git a/engine/apps/api/tests/test_slack_team_settings.py b/engine/apps/api/tests/test_slack_team_settings.py
new file mode 100644
index 0000000000..31df5d83ad
--- /dev/null
+++ b/engine/apps/api/tests/test_slack_team_settings.py
@@ -0,0 +1,181 @@
+from unittest.mock import patch
+
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.test import APIClient
+
+from common.constants.role import Role
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_get_slack_settings_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ client = APIClient()
+
+ url = reverse("api-internal:slack-settings")
+ with patch(
+ "apps.api.views.slack_team_settings.SlackTeamSettingsAPIView.get",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_update_slack_settings_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ client = APIClient()
+
+ url = reverse("api-internal:slack-settings")
+ with patch(
+ "apps.api.views.slack_team_settings.SlackTeamSettingsAPIView.put",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.put(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_get_acknowledge_remind_options_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ client = APIClient()
+
+ url = reverse("api-internal:acknowledge-reminder-options")
+ with patch(
+ "apps.api.views.slack_team_settings.AcknowledgeReminderOptionsAPIView.get",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_get_unacknowledge_timeout_options_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ client = APIClient()
+
+ url = reverse("api-internal:unacknowledge-timeout-options")
+ with patch(
+ "apps.api.views.slack_team_settings.UnAcknowledgeTimeoutOptionsAPIView.get",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+def test_get_slack_settings(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ client = APIClient()
+
+ expected_payload = {
+ "pk": organization.public_primary_key,
+ "acknowledge_remind_timeout": 0,
+ "unacknowledge_timeout": 0,
+ }
+
+ url = reverse("api-internal:slack-settings")
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_payload
+
+
+@pytest.mark.django_db
+def test_put_slack_settings(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ client = APIClient()
+
+ expected_payload = {
+ "pk": organization.public_primary_key,
+ "acknowledge_remind_timeout": 0,
+ "unacknowledge_timeout": 0,
+ }
+
+ data_to_update = {
+ "acknowledge_remind_timeout": 1,
+ "unacknowledge_timeout": 1,
+ }
+
+ url = reverse("api-internal:slack-settings")
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.data == expected_payload
+
+ response = client.put(url, data=data_to_update, format="json", **make_user_auth_headers(user, token))
+ expected_payload.update(data_to_update)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_payload
diff --git a/engine/apps/api/tests/test_subscription.py b/engine/apps/api/tests/test_subscription.py
new file mode 100644
index 0000000000..ef61c94995
--- /dev/null
+++ b/engine/apps/api/tests/test_subscription.py
@@ -0,0 +1,39 @@
+from unittest.mock import patch
+
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.test import APIClient
+
+from common.constants.role import Role
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_subscription_retrieve_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ _, user, token = make_organization_and_user_with_plugin_token(role)
+ client = APIClient()
+
+ url = reverse("api-internal:subscription")
+ with patch(
+ "apps.api.views.subscription.SubscriptionView.get",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
diff --git a/engine/apps/api/tests/test_team.py b/engine/apps/api/tests/test_team.py
new file mode 100644
index 0000000000..50c58dd1a3
--- /dev/null
+++ b/engine/apps/api/tests/test_team.py
@@ -0,0 +1,87 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.user_management.models import Team
+from common.constants.role import Role
+
+GENERAL_TEAM = Team(public_primary_key=None, name="General", email=None, avatar_url=None)
+
+
+def get_payload_from_team(team):
+ return {"id": team.public_primary_key, "name": team.name, "email": team.email, "avatar_url": team.avatar_url}
+
+
+@pytest.mark.django_db
+def test_list_teams(
+ make_organization,
+ make_team,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization)
+ _, token = make_token_for_organization(organization)
+
+ team = make_team(organization)
+ team.users.add(user)
+
+ client = APIClient()
+ url = reverse("api-internal:team-list")
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ expected_payload = [get_payload_from_team(team), get_payload_from_team(GENERAL_TEAM)]
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_payload
+
+
+@pytest.mark.django_db
+def test_list_teams_for_non_member(
+ make_organization,
+ make_team,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ organization = make_organization()
+ make_team(organization)
+ user = make_user_for_organization(organization)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:team-list")
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == [get_payload_from_team(GENERAL_TEAM)]
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_list_teams_permissions(
+ make_organization,
+ make_token_for_organization,
+ make_user_for_organization,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization = make_organization()
+ _, token = make_token_for_organization(organization)
+ user = make_user_for_organization(organization, role=role)
+
+ client = APIClient()
+ url = reverse("api-internal:team-list")
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
diff --git a/engine/apps/api/tests/test_telegram_channel.py b/engine/apps/api/tests/test_telegram_channel.py
new file mode 100644
index 0000000000..6bf26b9cc3
--- /dev/null
+++ b/engine/apps/api/tests/test_telegram_channel.py
@@ -0,0 +1,266 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from common.constants.role import Role
+
+
+@pytest.mark.django_db
+def test_not_authorized(make_organization_and_user_with_plugin_token, make_telegram_channel):
+ client = APIClient()
+
+ organization, user, _ = make_organization_and_user_with_plugin_token()
+ telegram_channel = make_telegram_channel(organization=organization)
+
+ url = reverse("api-internal:telegram_channel-list")
+ response = client.get(url)
+ assert response.status_code == status.HTTP_401_UNAUTHORIZED
+
+ url = reverse("api-internal:telegram_channel-detail", kwargs={"pk": telegram_channel.public_primary_key})
+ response = client.get(url)
+ assert response.status_code == status.HTTP_401_UNAUTHORIZED
+
+ url = reverse("api-internal:telegram_channel-detail", kwargs={"pk": telegram_channel.public_primary_key})
+ response = client.delete(url)
+ assert response.status_code == status.HTTP_401_UNAUTHORIZED
+
+ url = reverse("api-internal:telegram_channel-set-default", kwargs={"pk": telegram_channel.public_primary_key})
+ response = client.post(url)
+ assert response.status_code == status.HTTP_401_UNAUTHORIZED
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_list_telegram_channels_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ client = APIClient()
+
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+
+ url = reverse("api-internal:telegram_channel-list")
+ response = client.get(url, **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_get_telegram_channels_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_telegram_channel,
+ role,
+ expected_status,
+):
+ client = APIClient()
+
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ telegram_channel = make_telegram_channel(organization=organization)
+
+ url = reverse("api-internal:telegram_channel-detail", kwargs={"pk": telegram_channel.public_primary_key})
+ response = client.get(url, **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_204_NO_CONTENT),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_delete_telegram_channels_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_telegram_channel,
+ role,
+ expected_status,
+):
+ client = APIClient()
+
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+ telegram_channel = make_telegram_channel(organization=organization)
+
+ url = reverse("api-internal:telegram_channel-detail", kwargs={"pk": telegram_channel.public_primary_key})
+ response = client.delete(url, **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_set_default_telegram_channels_permissions(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_telegram_channel,
+ role,
+ expected_status,
+):
+ client = APIClient()
+
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+
+ telegram_channel = make_telegram_channel(organization=organization)
+
+ url = reverse("api-internal:telegram_channel-set-default", kwargs={"pk": telegram_channel.public_primary_key})
+ response = client.post(url, **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+def test_list_telegram_channels(
+ make_telegram_channel, make_organization_and_user_with_plugin_token, make_user_auth_headers
+):
+ client = APIClient()
+
+ organization, user, token = make_organization_and_user_with_plugin_token()
+
+ first_telegram_channel = make_telegram_channel(organization=organization)
+ second_telegram_channel = make_telegram_channel(organization=organization, is_default_channel=True)
+
+ expected_payload = [
+ {
+ "id": first_telegram_channel.public_primary_key,
+ "channel_chat_id": first_telegram_channel.channel_chat_id,
+ "discussion_group_chat_id": first_telegram_channel.discussion_group_chat_id,
+ "channel_name": first_telegram_channel.channel_name,
+ "discussion_group_name": first_telegram_channel.discussion_group_name,
+ "is_default_channel": False,
+ },
+ {
+ "id": second_telegram_channel.public_primary_key,
+ "channel_chat_id": second_telegram_channel.channel_chat_id,
+ "discussion_group_chat_id": second_telegram_channel.discussion_group_chat_id,
+ "channel_name": second_telegram_channel.channel_name,
+ "discussion_group_name": second_telegram_channel.discussion_group_name,
+ "is_default_channel": True,
+ },
+ ]
+
+ url = reverse("api-internal:telegram_channel-list")
+ response = client.get(url, **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_payload
+
+
+@pytest.mark.django_db
+def test_get_telegram_channel(
+ make_telegram_channel, make_organization_and_user_with_plugin_token, make_user_auth_headers
+):
+ client = APIClient()
+
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ telegram_channel = make_telegram_channel(organization=organization, is_default_channel=True)
+
+ expected_payload = {
+ "id": telegram_channel.public_primary_key,
+ "channel_chat_id": telegram_channel.channel_chat_id,
+ "discussion_group_chat_id": telegram_channel.discussion_group_chat_id,
+ "channel_name": telegram_channel.channel_name,
+ "discussion_group_name": telegram_channel.discussion_group_name,
+ "is_default_channel": True,
+ }
+
+ url = reverse("api-internal:telegram_channel-detail", kwargs={"pk": telegram_channel.public_primary_key})
+ response = client.get(url, **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_payload
+
+
+@pytest.mark.django_db
+def test_delete_telegram_channel(
+ make_telegram_channel, make_organization_and_user_with_plugin_token, make_user_auth_headers
+):
+ client = APIClient()
+
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ telegram_channel = make_telegram_channel(organization=organization, is_default_channel=True)
+
+ url = reverse("api-internal:telegram_channel-detail", kwargs={"pk": telegram_channel.public_primary_key})
+ response = client.delete(url, **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+
+ url = reverse("api-internal:telegram_channel-detail", kwargs={"pk": telegram_channel.public_primary_key})
+ response = client.get(url, **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_404_NOT_FOUND
+
+
+@pytest.mark.django_db
+def test_access_other_organizations_telegram_channels(
+ make_organization_and_user_with_plugin_token, make_telegram_channel, make_user_auth_headers
+):
+ client = APIClient()
+
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ telegram_channel = make_telegram_channel(organization=organization)
+
+ other_organization, other_user, other_token = make_organization_and_user_with_plugin_token()
+
+ url = reverse("api-internal:telegram_channel-detail", kwargs={"pk": telegram_channel.public_primary_key})
+ response = client.get(url, **make_user_auth_headers(other_user, other_token))
+ assert response.status_code == status.HTTP_404_NOT_FOUND
+
+ url = reverse("api-internal:telegram_channel-detail", kwargs={"pk": telegram_channel.public_primary_key})
+ response = client.delete(url, **make_user_auth_headers(other_user, other_token))
+ assert response.status_code == status.HTTP_404_NOT_FOUND
+
+ url = reverse("api-internal:telegram_channel-list")
+ response = client.get(url, **make_user_auth_headers(other_user, other_token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == []
+
+ url = reverse("api-internal:telegram_channel-set-default", kwargs={"pk": telegram_channel.public_primary_key})
+ response = client.post(url, **make_user_auth_headers(other_user, other_token))
+ assert response.status_code == status.HTTP_404_NOT_FOUND
+
+
+@pytest.mark.django_db
+def test_set_default(make_telegram_channel, make_organization_and_user_with_plugin_token, make_user_auth_headers):
+ client = APIClient()
+
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ first_telegram_channel = make_telegram_channel(organization=organization, is_default_channel=True)
+ second_telegram_channel = make_telegram_channel(organization=organization)
+
+ url = reverse(
+ "api-internal:telegram_channel-set-default", kwargs={"pk": second_telegram_channel.public_primary_key}
+ )
+ response = client.post(url, **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+
+ first_telegram_channel.refresh_from_db()
+ second_telegram_channel.refresh_from_db()
+
+ assert first_telegram_channel.is_default_channel is False
+ assert second_telegram_channel.is_default_channel is True
diff --git a/engine/apps/api/tests/test_terraform_renderer.py b/engine/apps/api/tests/test_terraform_renderer.py
new file mode 100644
index 0000000000..16e5f6542a
--- /dev/null
+++ b/engine/apps/api/tests/test_terraform_renderer.py
@@ -0,0 +1,25 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+
+@pytest.mark.django_db
+def test_get_terraform_file(
+ make_organization_and_user_with_plugin_token, make_user_auth_headers, make_escalation_chain
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ make_escalation_chain(organization)
+ client = APIClient()
+ url = reverse("api-internal:terraform_file")
+ response = client.get(url, format="text/plain", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+
+
+@pytest.mark.django_db
+def test_get_terraform_imports(make_organization_and_user_with_plugin_token, make_user_auth_headers):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ client = APIClient()
+ url = reverse("api-internal:terraform_imports")
+ response = client.get(url, format="text/plain", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
diff --git a/engine/apps/api/tests/test_user.py b/engine/apps/api/tests/test_user.py
new file mode 100644
index 0000000000..5731ed17dd
--- /dev/null
+++ b/engine/apps/api/tests/test_user.py
@@ -0,0 +1,1484 @@
+from unittest.mock import Mock, patch
+
+import pytest
+from django.core.exceptions import ObjectDoesNotExist
+from django.urls import reverse
+from django.utils import timezone
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.test import APIClient
+
+from apps.base.constants import ADMIN_PERMISSIONS, EDITOR_PERMISSIONS
+from apps.base.models import UserNotificationPolicy
+from common.constants.role import Role
+
+
+@pytest.mark.django_db
+def test_update_user(
+ make_organization,
+ make_team,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization)
+
+ team = make_team(organization)
+ team.users.add(admin)
+
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-detail", kwargs={"pk": admin.public_primary_key})
+ data = {
+ "unverified_phone_number": "+79123456789",
+ "current_team": team.public_primary_key,
+ }
+ response = client.put(url, data, format="json", **make_user_auth_headers(admin, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["unverified_phone_number"] == data["unverified_phone_number"]
+ assert response.json()["current_team"] == data["current_team"]
+
+
+@pytest.mark.django_db
+def test_update_user_cant_change_email_and_username(
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-detail", kwargs={"pk": admin.public_primary_key})
+ phone_number = "+79123456789"
+ data = {
+ "unverified_phone_number": phone_number,
+ "email": "test@amixr.io",
+ "username": "bob",
+ }
+ expected_response = {
+ "pk": admin.public_primary_key,
+ "organization": {"pk": organization.public_primary_key, "name": organization.org_title},
+ "current_team": None,
+ "email": admin.email,
+ "username": admin.username,
+ "role": admin.role,
+ "unverified_phone_number": phone_number,
+ "verified_phone_number": None,
+ "telegram_configuration": None,
+ "messaging_backends": {
+ "TESTONLY": {
+ "user": admin.username,
+ }
+ },
+ "permissions": ADMIN_PERMISSIONS,
+ "notification_chain_verbal": {"default": "", "important": ""},
+ "slack_user_identity": None,
+ "avatar": admin.avatar_url,
+ }
+ response = client.put(url, data, format="json", **make_user_auth_headers(admin, token))
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_list_users(
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization)
+ editor = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-list")
+
+ expected_payload = {
+ "count": 2,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "pk": admin.public_primary_key,
+ "organization": {"pk": organization.public_primary_key, "name": organization.org_title},
+ "current_team": None,
+ "email": admin.email,
+ "username": admin.username,
+ "role": admin.role,
+ "unverified_phone_number": None,
+ "verified_phone_number": None,
+ "telegram_configuration": None,
+ "messaging_backends": {
+ "TESTONLY": {
+ "user": admin.username,
+ }
+ },
+ "permissions": ADMIN_PERMISSIONS,
+ "notification_chain_verbal": {"default": "", "important": ""},
+ "slack_user_identity": None,
+ "avatar": admin.avatar_url,
+ },
+ {
+ "pk": editor.public_primary_key,
+ "organization": {"pk": organization.public_primary_key, "name": organization.org_title},
+ "current_team": None,
+ "email": editor.email,
+ "username": editor.username,
+ "role": editor.role,
+ "unverified_phone_number": None,
+ "verified_phone_number": None,
+ "telegram_configuration": None,
+ "messaging_backends": {
+ "TESTONLY": {
+ "user": editor.username,
+ }
+ },
+ "permissions": EDITOR_PERMISSIONS,
+ "notification_chain_verbal": {"default": "", "important": ""},
+ "slack_user_identity": None,
+ "avatar": editor.avatar_url,
+ },
+ ],
+ }
+
+ response = client.get(url, format="json", **make_user_auth_headers(admin, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_payload
+
+
+@pytest.mark.django_db
+def test_notification_chain_verbal(
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+ make_user_notification_policy,
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization)
+ _, token = make_token_for_organization(organization)
+
+ data_for_creation = [
+ {"step": UserNotificationPolicy.Step.NOTIFY, "notify_by": UserNotificationPolicy.NotificationChannel.SLACK},
+ {"step": UserNotificationPolicy.Step.WAIT, "wait_delay": timezone.timedelta(minutes=5)},
+ {
+ "step": UserNotificationPolicy.Step.NOTIFY,
+ "notify_by": UserNotificationPolicy.NotificationChannel.PHONE_CALL,
+ },
+ {"step": UserNotificationPolicy.Step.WAIT, "wait_delay": None},
+ {"step": UserNotificationPolicy.Step.NOTIFY, "notify_by": UserNotificationPolicy.NotificationChannel.TELEGRAM},
+ {"step": None},
+ {
+ "step": UserNotificationPolicy.Step.NOTIFY,
+ "notify_by": UserNotificationPolicy.NotificationChannel.SLACK,
+ "important": True,
+ },
+ {"step": UserNotificationPolicy.Step.WAIT, "wait_delay": timezone.timedelta(minutes=5), "important": True},
+ {
+ "step": UserNotificationPolicy.Step.NOTIFY,
+ "notify_by": UserNotificationPolicy.NotificationChannel.PHONE_CALL,
+ "important": True,
+ },
+ {"step": UserNotificationPolicy.Step.WAIT, "wait_delay": None, "important": True},
+ {
+ "step": UserNotificationPolicy.Step.NOTIFY,
+ "notify_by": UserNotificationPolicy.NotificationChannel.TELEGRAM,
+ "important": True,
+ },
+ ]
+
+ for data in data_for_creation:
+ make_user_notification_policy(admin, **data)
+
+ client = APIClient()
+ url = reverse("api-internal:user-detail", kwargs={"pk": admin.public_primary_key})
+
+ expected_notification_chain = {
+ "default": "Slack - 5 min - \U0000260E - Telegram",
+ "important": "Slack - 5 min - \U0000260E - Telegram",
+ }
+
+ response = client.get(url, format="json", **make_user_auth_headers(admin, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["notification_chain_verbal"] == expected_notification_chain
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_user_update_self_permissions(
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization = make_organization()
+ tester = make_user_for_organization(organization, role=role)
+ _, token = make_token_for_organization(organization)
+ client = APIClient()
+ url = reverse("api-internal:user-detail", kwargs={"pk": tester.public_primary_key})
+ with patch(
+ "apps.api.views.user.UserView.update",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.put(url, format="json", **make_user_auth_headers(tester, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_user_update_other_permissions(
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization)
+ tester = make_user_for_organization(organization, role=role)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-detail", kwargs={"pk": admin.public_primary_key})
+ data = {"unverified_phone_number": "+79123456789"}
+
+ response = client.put(url, data, format="json", **make_user_auth_headers(tester, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_user_list_permissions(
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization = make_organization()
+ tester = make_user_for_organization(organization, role=role)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-list")
+ with patch(
+ "apps.api.views.user.UserView.list",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(tester, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_user_detail_self_permissions(
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization = make_organization()
+ tester = make_user_for_organization(organization, role=role)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-detail", kwargs={"pk": tester.public_primary_key})
+ with patch(
+ "apps.api.views.user.UserView.list",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(tester, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_user_detail_other_permissions(
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization)
+ tester = make_user_for_organization(organization, role=role)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-detail", kwargs={"pk": admin.public_primary_key})
+ response = client.get(url, format="json", **make_user_auth_headers(tester, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_user_get_own_verification_code(
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization = make_organization()
+ tester = make_user_for_organization(organization, role=role)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-get-verification-code", kwargs={"pk": tester.public_primary_key})
+ with patch(
+ "apps.api.views.user.UserView.get_verification_code",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.get(url, format="json", **make_user_auth_headers(tester, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_user_get_other_verification_code(
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization)
+ tester = make_user_for_organization(organization, role=role)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-get-verification-code", kwargs={"pk": admin.public_primary_key})
+ with patch("apps.twilioapp.phone_manager.PhoneManager.send_verification_code", return_value=Mock()):
+ response = client.get(url, format="json", **make_user_auth_headers(tester, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_user_verify_own_phone(
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization = make_organization()
+ tester = make_user_for_organization(organization, role=role)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-verify-number", kwargs={"pk": tester.public_primary_key})
+ with patch(
+ "apps.api.views.user.UserView.verify_number",
+ return_value=Response(
+ status=status.HTTP_200_OK,
+ ),
+ ):
+ response = client.put(url, format="json", **make_user_auth_headers(tester, token))
+
+ assert response.status_code == expected_status
+
+
+"""
+Tests below are outdated
+"""
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_user_verify_another_phone(
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization = make_organization()
+ tester = make_user_for_organization(organization, role=role)
+ other_user = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-verify-number", kwargs={"pk": other_user.public_primary_key})
+
+ with patch("apps.twilioapp.phone_manager.PhoneManager.verify_phone_number", return_value=(True, None)):
+ response = client.put(f"{url}?token=12345", format="json", **make_user_auth_headers(tester, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_user_get_own_telegram_verification_code(
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization = make_organization()
+ tester = make_user_for_organization(organization, role=role)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-get-telegram-verification-code", kwargs={"pk": tester.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(tester, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_user_get_another_telegram_verification_code(
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+ organization = make_organization()
+ tester = make_user_for_organization(organization, role=role)
+ other_user = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-get-telegram-verification-code", kwargs={"pk": other_user.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(tester, token))
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+def test_admin_can_update_user(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ tester = make_user_for_organization(organization, role=Role.ADMIN)
+ other_user = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ data = {
+ "email": "test@amixr.io",
+ "role": Role.ADMIN,
+ "username": "updated_test_username",
+ "unverified_phone_number": "+1234567890",
+ "slack_login": "",
+ }
+ url = reverse("api-internal:user-detail", kwargs={"pk": other_user.public_primary_key})
+ response = client.put(url, format="json", data=data, **make_user_auth_headers(tester, token))
+
+ assert response.status_code == status.HTTP_200_OK
+
+
+@pytest.mark.django_db
+def test_admin_can_update_himself(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization, role=Role.ADMIN)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ data = {
+ "email": "test@amixr.io",
+ "role": Role.ADMIN,
+ "username": "updated_test_username",
+ "unverified_phone_number": "+1234567890",
+ "slack_login": "",
+ }
+
+ url = reverse("api-internal:user-detail", kwargs={"pk": admin.public_primary_key})
+ response = client.put(url, format="json", data=data, **make_user_auth_headers(admin, token))
+
+ assert response.status_code == status.HTTP_200_OK
+
+
+@pytest.mark.django_db
+def test_admin_can_list_users(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization, role=Role.ADMIN)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+
+ url = reverse("api-internal:user-list")
+ response = client.get(url, format="json", **make_user_auth_headers(admin, token))
+
+ assert response.status_code == status.HTTP_200_OK
+
+
+@pytest.mark.django_db
+def test_admin_can_detail_users(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization, role=Role.ADMIN)
+ editor = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+
+ url = reverse("api-internal:user-detail", kwargs={"pk": editor.public_primary_key})
+ response = client.get(url, format="json", **make_user_auth_headers(admin, token))
+
+ assert response.status_code == status.HTTP_200_OK
+
+
+@patch("apps.twilioapp.phone_manager.PhoneManager.send_verification_code", return_value=Mock())
+@pytest.mark.django_db
+def test_admin_can_get_own_verification_code(
+ mock_verification_start,
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization, role=Role.ADMIN)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-get-verification-code", kwargs={"pk": admin.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(admin, token))
+ assert response.status_code == status.HTTP_200_OK
+
+
+@patch("apps.twilioapp.phone_manager.PhoneManager.send_verification_code", return_value=Mock())
+@pytest.mark.django_db
+def test_admin_can_get_another_user_verification_code(
+ mock_verification_start,
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization, role=Role.ADMIN)
+ editor = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-get-verification-code", kwargs={"pk": editor.public_primary_key})
+ response = client.get(url, format="json", **make_user_auth_headers(admin, token))
+ assert response.status_code == status.HTTP_200_OK
+
+
+@patch("apps.twilioapp.phone_manager.PhoneManager.verify_phone_number", return_value=(True, None))
+@pytest.mark.django_db
+def test_admin_can_verify_own_phone(
+ mocked_verification_check,
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization, role=Role.ADMIN)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-verify-number", kwargs={"pk": admin.public_primary_key})
+
+ response = client.put(f"{url}?token=12345", format="json", **make_user_auth_headers(admin, token))
+ assert response.status_code == status.HTTP_200_OK
+
+
+@patch("apps.twilioapp.phone_manager.PhoneManager.verify_phone_number", return_value=(True, None))
+@pytest.mark.django_db
+def test_admin_can_verify_another_user_phone(
+ mocked_verification_check,
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization, role=Role.ADMIN)
+ editor = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-verify-number", kwargs={"pk": editor.public_primary_key})
+
+ response = client.put(f"{url}?token=12345", format="json", **make_user_auth_headers(admin, token))
+ assert response.status_code == status.HTTP_200_OK
+
+
+@pytest.mark.django_db
+def test_admin_can_get_own_telegram_verification_code(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization, role=Role.ADMIN)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-get-telegram-verification-code", kwargs={"pk": admin.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(admin, token))
+ assert response.status_code == status.HTTP_200_OK
+
+
+@pytest.mark.django_db
+def test_admin_can_get_another_user_telegram_verification_code(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization, role=Role.ADMIN)
+ editor = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-get-telegram-verification-code", kwargs={"pk": editor.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(admin, token))
+ assert response.status_code == status.HTTP_200_OK
+
+
+@pytest.mark.django_db
+def test_admin_can_get_another_user_backend_verification_code(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization, role=Role.ADMIN)
+ editor = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = (
+ reverse("api-internal:user-get-backend-verification-code", kwargs={"pk": editor.public_primary_key})
+ + "?backend=TESTONLY"
+ )
+
+ response = client.get(url, format="json", **make_user_auth_headers(admin, token))
+ assert response.status_code == status.HTTP_200_OK
+
+
+@pytest.mark.django_db
+def test_admin_can_unlink_another_user_backend_account(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization, role=Role.ADMIN)
+ editor = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-unlink-backend", kwargs={"pk": editor.public_primary_key}) + "?backend=TESTONLY"
+
+ response = client.post(url, format="json", **make_user_auth_headers(admin, token))
+ assert response.status_code == status.HTTP_200_OK
+
+
+"""Test user permissions"""
+
+
+@pytest.mark.django_db
+def test_user_cant_update_user(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ first_user = make_user_for_organization(organization, role=Role.EDITOR)
+ second_user = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ data = {
+ "email": "test@amixr.io",
+ "role": Role.ADMIN,
+ "username": "updated_test_username",
+ "unverified_phone_number": "+1234567890",
+ "slack_login": "",
+ }
+ url = reverse("api-internal:user-detail", kwargs={"pk": first_user.public_primary_key})
+ response = client.put(url, format="json", data=data, **make_user_auth_headers(second_user, token))
+
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@pytest.mark.django_db
+def test_user_can_update_themself(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ data = {
+ "email": "test@amixr.io",
+ "role": Role.EDITOR,
+ "username": "updated_test_username",
+ "unverified_phone_number": "+1234567890",
+ "slack_login": "",
+ }
+
+ url = reverse("api-internal:user-detail", kwargs={"pk": user.public_primary_key})
+ response = client.put(url, format="json", data=data, **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+
+
+@pytest.mark.django_db
+def test_user_can_list_users(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ editor = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+
+ url = reverse("api-internal:user-list")
+ response = client.get(url, format="json", **make_user_auth_headers(editor, token))
+
+ assert response.status_code == status.HTTP_200_OK
+
+
+@pytest.mark.django_db
+def test_user_can_detail_users(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization, role=Role.ADMIN)
+ editor = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-detail", kwargs={"pk": admin.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(editor, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@patch("apps.twilioapp.phone_manager.PhoneManager.send_verification_code", return_value=Mock())
+@pytest.mark.django_db
+def test_user_can_get_own_verification_code(
+ mock_verification_start,
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-get-verification-code", kwargs={"pk": user.public_primary_key})
+
+ response = client.get(f"{url}", format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+
+
+@patch("apps.twilioapp.phone_manager.PhoneManager.send_verification_code", return_value=Mock())
+@pytest.mark.django_db
+def test_user_cant_get_another_user_verification_code(
+ mock_verification_start,
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ organization = make_organization()
+ first_user = make_user_for_organization(organization, role=Role.EDITOR)
+ second_user = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-get-verification-code", kwargs={"pk": first_user.public_primary_key})
+
+ response = client.get(f"{url}", format="json", **make_user_auth_headers(second_user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@patch("apps.twilioapp.phone_manager.PhoneManager.verify_phone_number", return_value=(True, None))
+@pytest.mark.django_db
+def test_user_can_verify_own_phone(
+ mocked_verification_check,
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-verify-number", kwargs={"pk": user.public_primary_key})
+
+ response = client.put(f"{url}?token=12345", format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+
+
+@patch("apps.twilioapp.phone_manager.PhoneManager.verify_phone_number", return_value=(True, None))
+@pytest.mark.django_db
+def test_user_cant_verify_another_user_phone(
+ mocked_verification_check,
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ organization = make_organization()
+ first_user = make_user_for_organization(organization, role=Role.EDITOR)
+ second_user = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-verify-number", kwargs={"pk": first_user.public_primary_key})
+
+ response = client.put(f"{url}?token=12345", format="json", **make_user_auth_headers(second_user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@pytest.mark.django_db
+def test_user_can_get_own_telegram_verification_code(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-get-telegram-verification-code", kwargs={"pk": user.public_primary_key})
+
+ response = client.get(f"{url}", format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+
+
+@pytest.mark.django_db
+def test_user_cant_get_another_user_telegram_verification_code(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ first_user = make_user_for_organization(organization, role=Role.EDITOR)
+ second_user = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-get-telegram-verification-code", kwargs={"pk": first_user.public_primary_key})
+
+ response = client.get(f"{url}", format="json", **make_user_auth_headers(second_user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@pytest.mark.django_db
+def test_user_can_get_own_backend_verification_code(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = (
+ reverse("api-internal:user-get-backend-verification-code", kwargs={"pk": user.public_primary_key})
+ + "?backend=TESTONLY"
+ )
+
+ with patch(
+ "apps.base.tests.messaging_backend.TestOnlyBackend.generate_user_verification_code",
+ return_value="the-code",
+ ) as mock_generate_code:
+ response = client.get(f"{url}", format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == "the-code"
+ mock_generate_code.assert_called_once_with(user)
+
+
+@pytest.mark.django_db
+def test_user_cant_get_another_user_backend_verification_code(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ first_user = make_user_for_organization(organization, role=Role.EDITOR)
+ second_user = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = (
+ reverse("api-internal:user-get-backend-verification-code", kwargs={"pk": first_user.public_primary_key})
+ + "?backend=TESTONLY"
+ )
+
+ response = client.get(f"{url}", format="json", **make_user_auth_headers(second_user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@pytest.mark.django_db
+def test_user_can_unlink_backend_own_account(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-unlink-backend", kwargs={"pk": user.public_primary_key}) + "?backend=TESTONLY"
+
+ response = client.post(f"{url}", format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+
+
+@pytest.mark.django_db
+def test_user_unlink_backend_invalid_backend_id(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-unlink-backend", kwargs={"pk": user.public_primary_key}) + "?backend=INVALID"
+
+ response = client.post(f"{url}", format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_user_unlink_backend_backend_account_not_found(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-unlink-backend", kwargs={"pk": user.public_primary_key}) + "?backend=TESTONLY"
+ with patch("apps.base.tests.messaging_backend.TestOnlyBackend.unlink_user", side_effect=ObjectDoesNotExist):
+ response = client.post(f"{url}", format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_user_cant_unlink_backend__another_user(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ first_user = make_user_for_organization(organization, role=Role.EDITOR)
+ second_user = make_user_for_organization(organization, role=Role.EDITOR)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = (
+ reverse("api-internal:user-unlink-backend", kwargs={"pk": first_user.public_primary_key}) + "?backend=TESTONLY"
+ )
+
+ response = client.post(f"{url}", format="json", **make_user_auth_headers(second_user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+"""Test stakeholder permissions"""
+
+
+@pytest.mark.django_db
+def test_viewer_cant_create_user(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization, role=Role.VIEWER)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-list")
+ data = {
+ "email": "test@amixr.io",
+ "role": Role.ADMIN,
+ "username": "test_username",
+ "unverified_phone_number": None,
+ "slack_login": "",
+ }
+ response = client.post(url, format="json", data=data, **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@pytest.mark.django_db
+def test_viewer_cant_update_user(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ first_user = make_user_for_organization(organization, role=Role.EDITOR)
+ second_user = make_user_for_organization(organization, role=Role.VIEWER)
+ _, token = make_token_for_organization(organization)
+
+ data = {
+ "email": "test@amixr.io",
+ "role": Role.EDITOR,
+ "username": "updated_test_username",
+ "unverified_phone_number": "+1234567890",
+ "slack_login": "",
+ }
+
+ client = APIClient()
+ url = reverse("api-internal:user-detail", kwargs={"pk": first_user.public_primary_key})
+ response = client.put(url, format="json", data=data, **make_user_auth_headers(second_user, token))
+
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@pytest.mark.django_db
+def test_viewer_cant_update_himself(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization, role=Role.VIEWER)
+ _, token = make_token_for_organization(organization)
+
+ data = {
+ "email": "test@amixr.io",
+ "role": Role.VIEWER,
+ "username": "updated_test_username",
+ "unverified_phone_number": "+1234567890",
+ "slack_login": "",
+ }
+
+ client = APIClient()
+ url = reverse("api-internal:user-detail", kwargs={"pk": user.public_primary_key})
+ response = client.put(url, format="json", data=data, **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@pytest.mark.django_db
+def test_viewer_cant_list_users(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization, role=Role.VIEWER)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-list")
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@pytest.mark.django_db
+def test_viewer_cant_detail_users(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ first_user = make_user_for_organization(organization, role=Role.EDITOR)
+ second_user = make_user_for_organization(organization, role=Role.VIEWER)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-detail", kwargs={"pk": first_user.public_primary_key})
+ response = client.get(url, format="json", **make_user_auth_headers(second_user, token))
+
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@patch("apps.twilioapp.phone_manager.PhoneManager.send_verification_code", return_value=Mock())
+@pytest.mark.django_db
+def test_viewer_cant_get_own_verification_code(
+ mock_verification_start,
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization, role=Role.VIEWER)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-get-verification-code", kwargs={"pk": user.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@patch("apps.twilioapp.phone_manager.PhoneManager.send_verification_code", return_value=Mock())
+@pytest.mark.django_db
+def test_viewer_cant_get_another_user_verification_code(
+ mock_verification_start,
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ organization = make_organization()
+ first_user = make_user_for_organization(organization, role=Role.EDITOR)
+ second_user = make_user_for_organization(organization, role=Role.VIEWER)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-get-verification-code", kwargs={"pk": first_user.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(second_user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@patch("apps.twilioapp.phone_manager.PhoneManager.verify_phone_number", return_value=(True, None))
+@pytest.mark.django_db
+def test_viewer_cant_verify_own_phone(
+ mocked_verification_check,
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization, role=Role.VIEWER)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-verify-number", kwargs={"pk": user.public_primary_key})
+
+ response = client.put(f"{url}?token=12345", format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@patch("apps.twilioapp.phone_manager.PhoneManager.verify_phone_number", return_value=(True, None))
+@pytest.mark.django_db
+def test_viewer_cant_verify_another_user_phone(
+ mocked_verification_check,
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ organization = make_organization()
+ first_user = make_user_for_organization(organization, role=Role.EDITOR)
+ second_user = make_user_for_organization(organization, role=Role.VIEWER)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-verify-number", kwargs={"pk": first_user.public_primary_key})
+
+ response = client.put(f"{url}?token=12345", format="json", **make_user_auth_headers(second_user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@pytest.mark.django_db
+def test_viewer_cant_get_own_telegram_verification_code(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization, role=Role.VIEWER)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-get-telegram-verification-code", kwargs={"pk": user.public_primary_key})
+
+ response = client.get(f"{url}", format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@pytest.mark.django_db
+def test_viewer_cant_get_another_user_telegram_verification_code(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ first_user = make_user_for_organization(organization, role=Role.EDITOR)
+ second_user = make_user_for_organization(organization, role=Role.VIEWER)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-get-telegram-verification-code", kwargs={"pk": first_user.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(second_user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status,initial_unverified_number,initial_verified_number",
+ [
+ (Role.ADMIN, status.HTTP_200_OK, "+1234567890", None),
+ (Role.EDITOR, status.HTTP_200_OK, "+1234567890", None),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN, "+1234567890", None),
+ (Role.ADMIN, status.HTTP_200_OK, None, "+1234567890"),
+ (Role.EDITOR, status.HTTP_200_OK, None, "+1234567890"),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN, None, "+1234567890"),
+ ],
+)
+def test_forget_own_number(
+ make_organization,
+ make_team,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+ role,
+ expected_status,
+ initial_unverified_number,
+ initial_verified_number,
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization, role=Role.ADMIN)
+ user = make_user_for_organization(
+ organization,
+ role=role,
+ unverified_phone_number=initial_unverified_number,
+ _verified_phone_number=initial_verified_number,
+ )
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-forget-number", kwargs={"pk": user.public_primary_key})
+ with patch(
+ "apps.twilioapp.phone_manager.PhoneManager.notify_about_changed_verified_phone_number", return_value=None
+ ):
+ response = client.put(url, None, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == expected_status
+
+ user_detail_url = reverse("api-internal:user-detail", kwargs={"pk": user.public_primary_key})
+ response = client.get(user_detail_url, None, format="json", **make_user_auth_headers(admin, token))
+ assert response.status_code == status.HTTP_200_OK
+ if expected_status == status.HTTP_200_OK:
+ assert not response.json()["unverified_phone_number"]
+ assert not response.json()["verified_phone_number"]
+ else:
+ assert response.json()["unverified_phone_number"] == initial_unverified_number
+ assert response.json()["verified_phone_number"] == initial_verified_number
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status,initial_unverified_number,initial_verified_number",
+ [
+ (Role.ADMIN, status.HTTP_200_OK, "+1234567890", None),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN, "+1234567890", None),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN, "+1234567890", None),
+ (Role.ADMIN, status.HTTP_200_OK, None, "+1234567890"),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN, None, "+1234567890"),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN, None, "+1234567890"),
+ ],
+)
+def test_forget_other_number(
+ make_organization,
+ make_team,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+ role,
+ expected_status,
+ initial_unverified_number,
+ initial_verified_number,
+):
+ organization = make_organization()
+ user = make_user_for_organization(
+ organization,
+ role=Role.ADMIN,
+ unverified_phone_number=initial_unverified_number,
+ _verified_phone_number=initial_verified_number,
+ )
+ other_user = make_user_for_organization(organization, role=role)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-forget-number", kwargs={"pk": user.public_primary_key})
+ with patch(
+ "apps.twilioapp.phone_manager.PhoneManager.notify_about_changed_verified_phone_number", return_value=None
+ ):
+ response = client.put(url, None, format="json", **make_user_auth_headers(other_user, token))
+ assert response.status_code == expected_status
+
+ user_detail_url = reverse("api-internal:user-detail", kwargs={"pk": user.public_primary_key})
+ response = client.get(user_detail_url, None, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+ if expected_status == status.HTTP_200_OK:
+ assert not response.json()["unverified_phone_number"]
+ assert not response.json()["verified_phone_number"]
+ else:
+ assert response.json()["unverified_phone_number"] == initial_unverified_number
+ assert response.json()["verified_phone_number"] == initial_verified_number
+
+
+@pytest.mark.django_db
+def test_viewer_cant_get_own_backend_verification_code(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization, role=Role.VIEWER)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = (
+ reverse("api-internal:user-get-backend-verification-code", kwargs={"pk": user.public_primary_key})
+ + "?backend=TESTONLY"
+ )
+
+ response = client.get(f"{url}", format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@pytest.mark.django_db
+def test_viewer_cant_get_another_user_backend_verification_code(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ first_user = make_user_for_organization(organization, role=Role.EDITOR)
+ second_user = make_user_for_organization(organization, role=Role.VIEWER)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = (
+ reverse("api-internal:user-get-backend-verification-code", kwargs={"pk": first_user.public_primary_key})
+ + "?backend=TESTONLY"
+ )
+
+ response = client.get(url, format="json", **make_user_auth_headers(second_user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@pytest.mark.django_db
+def test_viewer_cant_unlink_backend_own_user(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization, role=Role.VIEWER)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user-unlink-backend", kwargs={"pk": user.public_primary_key}) + "?backend=TESTONLY"
+
+ response = client.post(f"{url}", format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@pytest.mark.django_db
+def test_viewer_cant_unlink_backend_another_user(
+ make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
+):
+ organization = make_organization()
+ first_user = make_user_for_organization(organization, role=Role.EDITOR)
+ second_user = make_user_for_organization(organization, role=Role.VIEWER)
+ _, token = make_token_for_organization(organization)
+
+ client = APIClient()
+ url = (
+ reverse("api-internal:user-unlink-backend", kwargs={"pk": first_user.public_primary_key}) + "?backend=TESTONLY"
+ )
+
+ response = client.post(url, format="json", **make_user_auth_headers(second_user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
diff --git a/engine/apps/api/tests/test_user_groups.py b/engine/apps/api/tests/test_user_groups.py
new file mode 100644
index 0000000000..ce7494a142
--- /dev/null
+++ b/engine/apps/api/tests/test_user_groups.py
@@ -0,0 +1,70 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from common.constants.role import Role
+
+
+@pytest.mark.django_db
+def test_usergroup_list(
+ make_slack_team_identity,
+ make_slack_user_group,
+ make_organization,
+ make_user_for_organization,
+ make_token_for_organization,
+ make_user_auth_headers,
+):
+ team_identity = make_slack_team_identity()
+ user_group = make_slack_user_group(
+ slack_team_identity=team_identity, name="Test User Group", handle="test-user-group"
+ )
+
+ organization = make_organization(slack_team_identity=team_identity)
+ _, token = make_token_for_organization(organization=organization)
+ user = make_user_for_organization(organization=organization)
+
+ client = APIClient()
+ url = reverse("api-internal:user_group-list")
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ expected_data = [{"id": user_group.public_primary_key, "name": "Test User Group", "handle": "test-user-group"}]
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_data
+
+
+@pytest.mark.django_db
+def test_usergroup_list_without_slack_installed(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+):
+ _, user, token = make_organization_and_user_with_plugin_token()
+
+ client = APIClient()
+ url = reverse("api-internal:user_group-list")
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == []
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_200_OK),
+ ],
+)
+def test_usergroup_permissions(
+ make_organization_and_user_with_plugin_token, make_user_auth_headers, role, expected_status
+):
+ _, user, token = make_organization_and_user_with_plugin_token(role)
+ client = APIClient()
+
+ url = reverse("api-internal:user_group-list")
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
diff --git a/engine/apps/api/tests/test_user_notification_policy.py b/engine/apps/api/tests/test_user_notification_policy.py
new file mode 100644
index 0000000000..4760b47cf5
--- /dev/null
+++ b/engine/apps/api/tests/test_user_notification_policy.py
@@ -0,0 +1,471 @@
+import json
+
+import pytest
+from django.urls import reverse
+from django.utils import timezone
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.base.models import UserNotificationPolicy
+from common.constants.role import Role
+
+DEFAULT_NOTIFICATION_CHANNEL = UserNotificationPolicy.NotificationChannel.SLACK
+
+
+@pytest.fixture()
+def user_notification_policy_internal_api_setup(
+ make_organization_and_user_with_plugin_token, make_user_for_organization, make_user_notification_policy
+):
+ organization, admin, token = make_organization_and_user_with_plugin_token()
+ user = make_user_for_organization(organization, Role.EDITOR)
+
+ wait_notification_step = make_user_notification_policy(
+ admin, UserNotificationPolicy.Step.WAIT, wait_delay=timezone.timedelta(minutes=15), important=False
+ )
+ notify_notification_step = make_user_notification_policy(
+ admin,
+ UserNotificationPolicy.Step.NOTIFY,
+ notify_by=UserNotificationPolicy.NotificationChannel.SLACK,
+ important=False,
+ )
+
+ important_notification_step = make_user_notification_policy(
+ admin,
+ UserNotificationPolicy.Step.NOTIFY,
+ notify_by=UserNotificationPolicy.NotificationChannel.SLACK,
+ important=True,
+ )
+
+ second_user_step = make_user_notification_policy(
+ user,
+ UserNotificationPolicy.Step.NOTIFY,
+ notify_by=UserNotificationPolicy.NotificationChannel.SLACK,
+ important=False,
+ )
+ steps = wait_notification_step, notify_notification_step, important_notification_step, second_user_step
+ users = admin, user
+ return token, steps, users
+
+
+@pytest.mark.django_db
+def test_create_notification_policy(user_notification_policy_internal_api_setup, make_user_auth_headers):
+ token, steps, users = user_notification_policy_internal_api_setup
+ admin, _ = users
+ client = APIClient()
+ url = reverse("api-internal:notification_policy-list")
+
+ data = {
+ "step": UserNotificationPolicy.Step.NOTIFY,
+ "notify_by": UserNotificationPolicy.NotificationChannel.SLACK,
+ "wait_delay": None,
+ "important": False,
+ "user": admin.public_primary_key,
+ }
+ response = client.post(url, data, format="json", **make_user_auth_headers(admin, token))
+ assert response.status_code == status.HTTP_201_CREATED
+
+
+@pytest.mark.django_db
+def test_admin_can_create_notification_policy_for_user(
+ user_notification_policy_internal_api_setup, make_user_auth_headers
+):
+ token, steps, users = user_notification_policy_internal_api_setup
+ admin, user = users
+ client = APIClient()
+ url = reverse("api-internal:notification_policy-list")
+
+ data = {
+ "step": UserNotificationPolicy.Step.NOTIFY,
+ "notify_by": UserNotificationPolicy.NotificationChannel.SLACK,
+ "wait_delay": None,
+ "important": False,
+ "user": user.public_primary_key,
+ }
+ response = client.post(url, data, format="json", **make_user_auth_headers(admin, token))
+ assert response.status_code == status.HTTP_201_CREATED
+
+
+@pytest.mark.django_db
+def test_user_cant_create_notification_policy_for_user(
+ user_notification_policy_internal_api_setup,
+ make_user_auth_headers,
+):
+ token, _, users = user_notification_policy_internal_api_setup
+ admin, user = users
+
+ client = APIClient()
+ url = reverse("api-internal:notification_policy-list")
+
+ data = {
+ "step": UserNotificationPolicy.Step.NOTIFY,
+ "notify_by": UserNotificationPolicy.NotificationChannel.SLACK,
+ "wait_delay": None,
+ "important": False,
+ "user": admin.public_primary_key,
+ }
+ response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@pytest.mark.django_db
+def test_create_notification_policy_from_step(
+ user_notification_policy_internal_api_setup,
+ make_user_auth_headers,
+):
+ token, steps, users = user_notification_policy_internal_api_setup
+ wait_notification_step, _, _, _ = steps
+ admin, _ = users
+ client = APIClient()
+ url = reverse("api-internal:notification_policy-list")
+
+ data = {
+ "prev_step": wait_notification_step.public_primary_key,
+ "step": UserNotificationPolicy.Step.NOTIFY,
+ "notify_by": UserNotificationPolicy.NotificationChannel.SLACK,
+ "wait_delay": None,
+ "important": False,
+ "user": admin.public_primary_key,
+ }
+ response = client.post(url, data, format="json", **make_user_auth_headers(admin, token))
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.data["order"] == 1
+
+
+@pytest.mark.django_db
+def test_create_invalid_notification_policy(user_notification_policy_internal_api_setup, make_user_auth_headers):
+ token, steps, users = user_notification_policy_internal_api_setup
+ wait_notification_step, _, _, _ = steps
+ admin, _ = users
+ client = APIClient()
+ url = reverse("api-internal:notification_policy-list")
+
+ data = {
+ "prev_step": wait_notification_step.public_primary_key,
+ "step": UserNotificationPolicy.Step.NOTIFY,
+ "notify_by": UserNotificationPolicy.NotificationChannel.SLACK,
+ "wait_delay": None,
+ "important": True,
+ "user": admin.public_primary_key,
+ }
+ response = client.post(url, data, format="json", **make_user_auth_headers(admin, token))
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_update_step(user_notification_policy_internal_api_setup, make_user_auth_headers):
+ token, steps, users = user_notification_policy_internal_api_setup
+ admin, _ = users
+
+ _, notify_notification_step, _, _ = steps
+ client = APIClient()
+ url = reverse("api-internal:notification_policy-detail", kwargs={"pk": notify_notification_step.public_primary_key})
+
+ response = client.patch(
+ url,
+ data=json.dumps({"notify_by": UserNotificationPolicy.NotificationChannel.PHONE_CALL}),
+ content_type="application/json",
+ **make_user_auth_headers(admin, token),
+ )
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["notify_by"] == UserNotificationPolicy.NotificationChannel.PHONE_CALL
+
+
+@pytest.mark.django_db
+def test_admin_can_update_user_step(user_notification_policy_internal_api_setup, make_user_auth_headers):
+ token, steps, users = user_notification_policy_internal_api_setup
+ admin, _ = users
+ _, _, _, second_user_step = steps
+ client = APIClient()
+ url = reverse("api-internal:notification_policy-detail", kwargs={"pk": second_user_step.public_primary_key})
+
+ response = client.patch(
+ url,
+ data=json.dumps({"notify_by": UserNotificationPolicy.NotificationChannel.PHONE_CALL}),
+ content_type="application/json",
+ **make_user_auth_headers(admin, token),
+ )
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["notify_by"] == UserNotificationPolicy.NotificationChannel.PHONE_CALL
+
+
+@pytest.mark.django_db
+def test_user_cant_update_admin_step(
+ user_notification_policy_internal_api_setup,
+ make_user_auth_headers,
+):
+ token, steps, users = user_notification_policy_internal_api_setup
+ _, user = users
+
+ admin_step, _, _, _ = steps
+ client = APIClient()
+ url = reverse("api-internal:notification_policy-detail", kwargs={"pk": admin_step.public_primary_key})
+
+ response = client.patch(
+ url,
+ data=json.dumps({"notify_by": UserNotificationPolicy.NotificationChannel.PHONE_CALL}),
+ content_type="application/json",
+ **make_user_auth_headers(user, token),
+ )
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@pytest.mark.django_db
+def test_admin_can_move_user_step(user_notification_policy_internal_api_setup, make_user_auth_headers):
+ token, steps, users = user_notification_policy_internal_api_setup
+ admin, _ = users
+ _, _, _, second_user_step = steps
+ client = APIClient()
+ url = reverse(
+ "api-internal:notification_policy-move-to-position", kwargs={"pk": second_user_step.public_primary_key}
+ )
+
+ response = client.put(f"{url}?position=1", content_type="application/json", **make_user_auth_headers(admin, token))
+ assert response.status_code == status.HTTP_200_OK
+
+
+@pytest.mark.django_db
+def test_user_cant_move_admin_step(user_notification_policy_internal_api_setup, make_user_auth_headers):
+ token, steps, users = user_notification_policy_internal_api_setup
+ _, user = users
+
+ admin_step, _, _, _ = steps
+ client = APIClient()
+ url = reverse("api-internal:notification_policy-move-to-position", kwargs={"pk": admin_step.public_primary_key})
+
+ response = client.put(f"{url}?position=1", content_type="application/json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+@pytest.mark.django_db
+def test_unable_to_change_importance(user_notification_policy_internal_api_setup, make_user_auth_headers):
+ token, steps, users = user_notification_policy_internal_api_setup
+ admin, _ = users
+ _, notify_notification_step, _, _ = steps
+ client = APIClient()
+ url = reverse("api-internal:notification_policy-detail", kwargs={"pk": notify_notification_step.public_primary_key})
+
+ data = {
+ "important": True,
+ }
+ response = client.patch(
+ url, data=json.dumps(data), content_type="application/json", **make_user_auth_headers(admin, token)
+ )
+ notify_notification_step.refresh_from_db()
+
+ assert response.status_code == status.HTTP_200_OK
+ assert notify_notification_step.important != data["important"]
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize("wait_delay, expected_wait_delay", [(None, "300.0"), ("900.0", "900.0")])
+def test_switch_step_type_from_notify_to_wait(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_user_notification_policy,
+ wait_delay,
+ expected_wait_delay,
+):
+ organization, admin, token = make_organization_and_user_with_plugin_token()
+
+ notify_notification_step = make_user_notification_policy(
+ admin,
+ UserNotificationPolicy.Step.NOTIFY,
+ notify_by=UserNotificationPolicy.NotificationChannel.SLACK,
+ important=False,
+ )
+ client = APIClient()
+ url = reverse("api-internal:notification_policy-detail", kwargs={"pk": notify_notification_step.public_primary_key})
+
+ data = {
+ "id": notify_notification_step.public_primary_key,
+ "important": False,
+ "notify_by": None,
+ "order": 0,
+ "user": admin.public_primary_key,
+ "step": UserNotificationPolicy.Step.WAIT,
+ "wait_delay": wait_delay,
+ }
+
+ expected_response = {
+ "id": notify_notification_step.public_primary_key,
+ "important": False,
+ "notify_by": None,
+ "order": 0,
+ "user": admin.public_primary_key,
+ "step": UserNotificationPolicy.Step.WAIT,
+ "wait_delay": expected_wait_delay,
+ }
+ response = client.put(
+ url, data=json.dumps(data), content_type="application/json", **make_user_auth_headers(admin, token)
+ )
+ notify_notification_step.refresh_from_db()
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "notification_channel, expected_notification_channel",
+ [
+ (None, DEFAULT_NOTIFICATION_CHANNEL),
+ (
+ UserNotificationPolicy.NotificationChannel.PHONE_CALL,
+ UserNotificationPolicy.NotificationChannel.PHONE_CALL,
+ ),
+ ],
+)
+def test_switch_step_type_wait_to_notify(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_user_notification_policy,
+ notification_channel,
+ expected_notification_channel,
+):
+ organization, admin, token = make_organization_and_user_with_plugin_token()
+
+ wait_notification_step = make_user_notification_policy(
+ admin,
+ UserNotificationPolicy.Step.WAIT,
+ important=False,
+ )
+
+ client = APIClient()
+ url = reverse("api-internal:notification_policy-detail", kwargs={"pk": wait_notification_step.public_primary_key})
+
+ data = {
+ "id": wait_notification_step.public_primary_key,
+ "important": False,
+ "notify_by": notification_channel,
+ "order": 0,
+ "user": admin.public_primary_key,
+ "step": UserNotificationPolicy.Step.NOTIFY,
+ "wait_delay": None,
+ }
+
+ expected_response = {
+ "id": wait_notification_step.public_primary_key,
+ "important": False,
+ "notify_by": expected_notification_channel,
+ "order": 0,
+ "user": admin.public_primary_key,
+ "step": UserNotificationPolicy.Step.NOTIFY,
+ "wait_delay": None,
+ }
+ response = client.put(
+ url, data=json.dumps(data), content_type="application/json", **make_user_auth_headers(admin, token)
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_switch_notification_channel(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_user_notification_policy,
+):
+ organization, admin, token = make_organization_and_user_with_plugin_token()
+
+ notify_notification_step = make_user_notification_policy(
+ admin,
+ UserNotificationPolicy.Step.NOTIFY,
+ notify_by=UserNotificationPolicy.NotificationChannel.SLACK,
+ important=False,
+ )
+ client = APIClient()
+ url = reverse("api-internal:notification_policy-detail", kwargs={"pk": notify_notification_step.public_primary_key})
+
+ data = {
+ "id": notify_notification_step.public_primary_key,
+ "important": False,
+ "notify_by": UserNotificationPolicy.NotificationChannel.PHONE_CALL,
+ "order": 0,
+ "user": admin.public_primary_key,
+ "step": UserNotificationPolicy.Step.NOTIFY,
+ "wait_delay": None,
+ }
+
+ expected_response = {
+ "id": notify_notification_step.public_primary_key,
+ "important": False,
+ "notify_by": UserNotificationPolicy.NotificationChannel.PHONE_CALL,
+ "order": 0,
+ "user": admin.public_primary_key,
+ "step": UserNotificationPolicy.Step.NOTIFY,
+ "wait_delay": None,
+ }
+ response = client.put(
+ url, data=json.dumps(data), content_type="application/json", **make_user_auth_headers(admin, token)
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "from_wait_delay, to_wait_delay", [(None, "300.0"), (timezone.timedelta(seconds=900), "900.0")]
+)
+def test_switch_wait_delay(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ make_user_notification_policy,
+ from_wait_delay,
+ to_wait_delay,
+):
+ organization, admin, token = make_organization_and_user_with_plugin_token()
+ wait_notification_step = make_user_notification_policy(
+ admin, UserNotificationPolicy.Step.WAIT, wait_delay=from_wait_delay, important=False
+ )
+ client = APIClient()
+ url = reverse("api-internal:notification_policy-detail", kwargs={"pk": wait_notification_step.public_primary_key})
+
+ data = {
+ "id": wait_notification_step.public_primary_key,
+ "important": False,
+ "notify_by": DEFAULT_NOTIFICATION_CHANNEL,
+ "order": 0,
+ "user": admin.public_primary_key,
+ "step": UserNotificationPolicy.Step.NOTIFY,
+ "wait_delay": to_wait_delay,
+ }
+
+ expected_response = {
+ "id": wait_notification_step.public_primary_key,
+ "important": False,
+ "notify_by": DEFAULT_NOTIFICATION_CHANNEL,
+ "order": 0,
+ "user": admin.public_primary_key,
+ "step": UserNotificationPolicy.Step.NOTIFY,
+ "wait_delay": to_wait_delay,
+ }
+ response = client.put(
+ url, data=json.dumps(data), content_type="application/json", **make_user_auth_headers(admin, token)
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize("feature_flag_enabled", [False, True])
+def test_notification_policy_backends_enabled(
+ user_notification_policy_internal_api_setup, settings, make_user_auth_headers, feature_flag_enabled
+):
+ token, _, users = user_notification_policy_internal_api_setup
+ admin, _ = users
+
+ settings.FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED = feature_flag_enabled
+
+ client = APIClient()
+ url = reverse("api-internal:notification_policy-notify-by-options")
+
+ response = client.get(url, **make_user_auth_headers(admin, token))
+ assert response.status_code == status.HTTP_200_OK
+ options = [opt["display_name"] for opt in response.json()]
+ if feature_flag_enabled:
+ assert "Test Only Backend" in options
+ else:
+ assert "Test Only Backend" not in options
diff --git a/engine/apps/api/tests/test_user_schedule_export.py b/engine/apps/api/tests/test_user_schedule_export.py
new file mode 100644
index 0000000000..a465a93489
--- /dev/null
+++ b/engine/apps/api/tests/test_user_schedule_export.py
@@ -0,0 +1,230 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.auth_token.models import UserScheduleExportAuthToken
+from common.constants.role import Role
+
+ICAL_URL = "https://calendar.google.com/calendar/ical/amixr.io_37gttuakhrtr75ano72p69rt78%40group.calendar.google.com/private-1d00a680ba5be7426c3eb3ef1616e26d/basic.ics" # noqa
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_200_OK),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_get_user_schedule_export_token(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+
+ UserScheduleExportAuthToken.create_auth_token(
+ user=user,
+ organization=organization,
+ )
+
+ client = APIClient()
+
+ url = reverse("api-internal:user-export-token", kwargs={"pk": user.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_404_NOT_FOUND),
+ (Role.EDITOR, status.HTTP_404_NOT_FOUND),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_user_schedule_export_token_not_found(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+
+ _, user, token = make_organization_and_user_with_plugin_token(role=role)
+
+ url = reverse("api-internal:user-export-token", kwargs={"pk": user.public_primary_key})
+
+ client = APIClient()
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_201_CREATED),
+ (Role.EDITOR, status.HTTP_201_CREATED),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_user_schedule_create_export_token(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+
+ _, user, token = make_organization_and_user_with_plugin_token(role=role)
+
+ url = reverse("api-internal:user-export-token", kwargs={"pk": user.public_primary_key})
+
+ client = APIClient()
+
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert expected_status == response.status_code
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_409_CONFLICT),
+ (Role.EDITOR, status.HTTP_409_CONFLICT),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_user_schedule_create_multiple_export_tokens_fails(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+
+ UserScheduleExportAuthToken.create_auth_token(
+ user=user,
+ organization=organization,
+ )
+
+ url = reverse("api-internal:user-export-token", kwargs={"pk": user.public_primary_key})
+
+ client = APIClient()
+
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert expected_status == response.status_code
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_204_NO_CONTENT),
+ (Role.EDITOR, status.HTTP_204_NO_CONTENT),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_user_schedule_delete_export_token(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+
+ organization, user, token = make_organization_and_user_with_plugin_token(role=role)
+
+ instance, _ = UserScheduleExportAuthToken.create_auth_token(
+ user=user,
+ organization=organization,
+ )
+
+ url = reverse("api-internal:user-export-token", kwargs={"pk": user.public_primary_key})
+
+ client = APIClient()
+
+ response = client.delete(url, format="json", **make_user_auth_headers(user, token))
+
+ assert expected_status == response.status_code
+
+ if response.status_code != 403:
+ check_token = UserScheduleExportAuthToken.objects.filter(id=instance.id)
+
+ assert len(check_token) == 0
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_404_NOT_FOUND),
+ (Role.EDITOR, status.HTTP_404_NOT_FOUND),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_user_cannot_get_another_users_schedule_token(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+
+ organization1, user1, _ = make_organization_and_user_with_plugin_token(role=role)
+ _, user2, token2 = make_organization_and_user_with_plugin_token(role=role)
+
+ UserScheduleExportAuthToken.create_auth_token(
+ user=user1,
+ organization=organization1,
+ )
+
+ url = reverse("api-internal:user-export-token", kwargs={"pk": user1.public_primary_key})
+
+ client = APIClient()
+
+ response = client.get(url, format="json", **make_user_auth_headers(user2, token2))
+
+ assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_404_NOT_FOUND),
+ (Role.EDITOR, status.HTTP_404_NOT_FOUND),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_user_cannot_delete_another_users_schedule_token(
+ make_organization_and_user_with_plugin_token,
+ make_user_auth_headers,
+ role,
+ expected_status,
+):
+
+ organization1, user1, _ = make_organization_and_user_with_plugin_token(role=role)
+ _, user2, token2 = make_organization_and_user_with_plugin_token(role=role)
+
+ UserScheduleExportAuthToken.create_auth_token(
+ user=user1,
+ organization=organization1,
+ )
+
+ url = reverse("api-internal:user-export-token", kwargs={"pk": user1.public_primary_key})
+
+ client = APIClient()
+
+ response = client.delete(url, format="json", **make_user_auth_headers(user2, token2))
+
+ assert response.status_code == expected_status
diff --git a/engine/apps/api/throttlers/__init__.py b/engine/apps/api/throttlers/__init__.py
new file mode 100644
index 0000000000..3d04dedf00
--- /dev/null
+++ b/engine/apps/api/throttlers/__init__.py
@@ -0,0 +1 @@
+from .demo_alert_throttler import DemoAlertThrottler # noqa: F401
diff --git a/engine/apps/api/throttlers/demo_alert_throttler.py b/engine/apps/api/throttlers/demo_alert_throttler.py
new file mode 100644
index 0000000000..e28cd8f8a7
--- /dev/null
+++ b/engine/apps/api/throttlers/demo_alert_throttler.py
@@ -0,0 +1,6 @@
+from rest_framework.throttling import UserRateThrottle
+
+
+class DemoAlertThrottler(UserRateThrottle):
+ scope = "send_demo_alert"
+ rate = "30/m"
diff --git a/engine/apps/api/urls.py b/engine/apps/api/urls.py
new file mode 100644
index 0000000000..a82ee4e985
--- /dev/null
+++ b/engine/apps/api/urls.py
@@ -0,0 +1,118 @@
+from django.conf import settings
+from django.urls import include, path
+
+from common.api_helpers.optional_slash_router import OptionalSlashRouter, optional_slash_path
+
+from .views import UserNotificationPolicyView, auth
+from .views.alert_group import AlertGroupView
+from .views.alert_receive_channel import AlertReceiveChannelView
+from .views.alert_receive_channel_template import AlertReceiveChannelTemplateView
+from .views.apns_device import APNSDeviceAuthorizedViewSet
+from .views.channel_filter import ChannelFilterView
+from .views.custom_button import CustomButtonView
+from .views.escalation_chain import EscalationChainViewSet
+from .views.escalation_policy import EscalationPolicyView
+from .views.features import FeaturesAPIView
+from .views.gitops import TerraformGitOpsView, TerraformStateView
+from .views.integration_heartbeat import IntegrationHeartBeatView
+from .views.live_setting import LiveSettingViewSet
+from .views.maintenance import MaintenanceAPIView, MaintenanceStartAPIView, MaintenanceStopAPIView
+from .views.organization import (
+ CurrentOrganizationView,
+ GetChannelVerificationCode,
+ GetTelegramVerificationCode,
+ SetGeneralChannel,
+)
+from .views.organization_log_record import OrganizationLogRecordView
+from .views.preview_template_options import PreviewTemplateOptionsView
+from .views.public_api_tokens import PublicApiTokenView
+from .views.resolution_note import ResolutionNoteView
+from .views.route_regex_debugger import RouteRegexDebuggerView
+from .views.schedule import ScheduleView
+from .views.slack_channel import SlackChannelView
+from .views.slack_team_settings import (
+ AcknowledgeReminderOptionsAPIView,
+ SlackTeamSettingsAPIView,
+ UnAcknowledgeTimeoutOptionsAPIView,
+)
+from .views.subscription import SubscriptionView
+from .views.team import TeamViewSet
+from .views.telegram_channels import TelegramChannelViewSet
+from .views.user import CurrentUserView, UserView
+from .views.user_group import UserGroupViewSet
+
+app_name = "api-internal"
+
+router = OptionalSlashRouter()
+router.register(r"users", UserView, basename="user")
+router.register(r"teams", TeamViewSet, basename="team")
+router.register(r"alertgroups", AlertGroupView, basename="alertgroup")
+router.register(r"notification_policies", UserNotificationPolicyView, basename="notification_policy")
+router.register(r"escalation_policies", EscalationPolicyView, basename="escalation_policy")
+router.register(r"escalation_chains", EscalationChainViewSet, basename="escalation_chain")
+router.register(r"alert_receive_channels", AlertReceiveChannelView, basename="alert_receive_channel")
+router.register(
+ r"alert_receive_channel_templates", AlertReceiveChannelTemplateView, basename="alert_receive_channel_template"
+)
+router.register(r"channel_filters", ChannelFilterView, basename="channel_filter")
+router.register(r"schedules", ScheduleView, basename="schedule")
+router.register(r"custom_buttons", CustomButtonView, basename="custom_button")
+router.register(r"resolution_notes", ResolutionNoteView, basename="resolution_note")
+router.register(r"telegram_channels", TelegramChannelViewSet, basename="telegram_channel")
+router.register(r"slack_channels", SlackChannelView, basename="slack_channel")
+router.register(r"user_groups", UserGroupViewSet, basename="user_group")
+router.register(r"heartbeats", IntegrationHeartBeatView, basename="integration_heartbeat")
+router.register(r"organization_logs", OrganizationLogRecordView, basename="organization_log")
+router.register(r"tokens", PublicApiTokenView, basename="api_token")
+router.register(r"live_settings", LiveSettingViewSet, basename="live_settings")
+
+if settings.MOBILE_APP_PUSH_NOTIFICATIONS_ENABLED:
+ router.register(r"device/apns", APNSDeviceAuthorizedViewSet)
+
+urlpatterns = [
+ path("", include(router.urls)),
+ optional_slash_path("user", CurrentUserView.as_view(), name="api-user"),
+ optional_slash_path("set_general_channel", SetGeneralChannel.as_view(), name="api-set-general-log-channel"),
+ optional_slash_path("current_team", CurrentOrganizationView.as_view(), name="api-current-team"),
+ optional_slash_path(
+ "current_team/get_telegram_verification_code",
+ GetTelegramVerificationCode.as_view(),
+ name="api-get-telegram-verification-code",
+ ),
+ optional_slash_path(
+ "current_team/get_channel_verification_code",
+ GetChannelVerificationCode.as_view(),
+ name="api-get-channel-verification-code",
+ ),
+ optional_slash_path("current_subscription", SubscriptionView.as_view(), name="subscription"),
+ optional_slash_path("terraform_file", TerraformGitOpsView.as_view(), name="terraform_file"),
+ optional_slash_path("terraform_imports", TerraformStateView.as_view(), name="terraform_imports"),
+ optional_slash_path("maintenance", MaintenanceAPIView.as_view(), name="maintenance"),
+ optional_slash_path("start_maintenance", MaintenanceStartAPIView.as_view(), name="start_maintenance"),
+ optional_slash_path("stop_maintenance", MaintenanceStopAPIView.as_view(), name="stop_maintenance"),
+ optional_slash_path("slack_settings", SlackTeamSettingsAPIView.as_view(), name="slack-settings"),
+ optional_slash_path(
+ "slack_settings/acknowledge_remind_options",
+ AcknowledgeReminderOptionsAPIView.as_view(),
+ name="acknowledge-reminder-options",
+ ),
+ optional_slash_path(
+ "slack_settings/unacknowledge_timeout_options",
+ UnAcknowledgeTimeoutOptionsAPIView.as_view(),
+ name="unacknowledge-timeout-options",
+ ),
+ optional_slash_path("features", FeaturesAPIView.as_view(), name="features"),
+ optional_slash_path(
+ "preview_template_options", PreviewTemplateOptionsView.as_view(), name="preview_template_options"
+ ),
+ optional_slash_path("route_regex_debugger", RouteRegexDebuggerView.as_view(), name="route_regex_debugger"),
+]
+
+urlpatterns += [
+ # For some reason frontend is using url without / at the end. Hacking here to avoid 301's :(
+ path(r"login/", auth.overridden_login_slack_auth, name="slack-auth-with-no-slash"),
+ path(r"login//", auth.overridden_login_slack_auth, name="slack-auth"),
+ path(r"complete//", auth.overridden_complete_slack_auth, name="complete-slack-auth"),
+]
+
+urlpatterns += router.urls
diff --git a/engine/apps/api/views/__init__.py b/engine/apps/api/views/__init__.py
new file mode 100644
index 0000000000..c46a5aea00
--- /dev/null
+++ b/engine/apps/api/views/__init__.py
@@ -0,0 +1 @@
+from .user_notification_policy import UserNotificationPolicyView # noqa: F401
diff --git a/engine/apps/api/views/alert_group.py b/engine/apps/api/views/alert_group.py
new file mode 100644
index 0000000000..5ea7e93b78
--- /dev/null
+++ b/engine/apps/api/views/alert_group.py
@@ -0,0 +1,571 @@
+from datetime import datetime, timedelta
+
+from django import forms
+from django.db import models
+from django.db.models import CharField, Q
+from django.db.models.constants import LOOKUP_SEP
+from django.db.models.functions import Cast
+from django.utils import timezone
+from django_filters import rest_framework as filters
+from django_filters.widgets import RangeWidget
+from rest_framework import mixins, status, viewsets
+from rest_framework.decorators import action
+from rest_framework.filters import SearchFilter
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+
+from apps.alerts.constants import ActionSource
+from apps.alerts.models import AlertGroup, AlertReceiveChannel
+from apps.alerts.tasks import invalidate_web_cache_for_alert_group
+from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission, AnyRole, IsAdminOrEditor
+from apps.api.serializers.alert_group import AlertGroupSerializer
+from apps.auth_token.auth import MobileAppAuthTokenAuthentication, PluginAuthentication
+from apps.user_management.models import User
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.filters import DateRangeFilterMixin, ModelFieldFilterMixin
+from common.api_helpers.mixins import PreviewTemplateMixin, PublicPrimaryKeyMixin
+from common.api_helpers.paginators import FiftyPageSizePaginator
+
+
+def get_integration_queryset(request):
+ if request is None:
+ return AlertReceiveChannel.objects.none()
+
+ return AlertReceiveChannel.objects_with_maintenance.filter(organization=request.user.organization)
+
+
+def get_user_queryset(request):
+ if request is None:
+ return User.objects.none()
+
+ return User.objects.filter(organization=request.user.organization).distinct()
+
+
+class AlertGroupFilter(DateRangeFilterMixin, ModelFieldFilterMixin, filters.FilterSet):
+ """
+ Examples of possible date formats here https://docs.djangoproject.com/en/1.9/ref/settings/#datetime-input-formats
+ """
+
+ started_at_gte = filters.DateTimeFilter(field_name="started_at", lookup_expr="gte")
+ started_at_lte = filters.DateTimeFilter(field_name="started_at", lookup_expr="lte")
+ resolved_at_lte = filters.DateTimeFilter(field_name="resolved_at", lookup_expr="lte")
+ is_root = filters.BooleanFilter(field_name="root_alert_group", lookup_expr="isnull")
+ id__in = filters.BaseInFilter(field_name="public_primary_key", lookup_expr="in")
+ status = filters.MultipleChoiceFilter(choices=AlertGroup.STATUS_CHOICES, method="filter_status")
+ started_at = filters.CharFilter(field_name="started_at", method=DateRangeFilterMixin.filter_date_range.__name__)
+ resolved_at = filters.CharFilter(field_name="resolved_at", method=DateRangeFilterMixin.filter_date_range.__name__)
+ silenced_at = filters.CharFilter(field_name="silenced_at", method=DateRangeFilterMixin.filter_date_range.__name__)
+ silenced_by = filters.ModelMultipleChoiceFilter(
+ field_name="silenced_by_user",
+ queryset=get_user_queryset,
+ to_field_name="public_primary_key",
+ method=ModelFieldFilterMixin.filter_model_field.__name__,
+ )
+ integration = filters.ModelMultipleChoiceFilter(
+ field_name="channel_filter__alert_receive_channel",
+ queryset=get_integration_queryset,
+ to_field_name="public_primary_key",
+ method=ModelFieldFilterMixin.filter_model_field.__name__,
+ )
+ started_at_range = filters.DateFromToRangeFilter(
+ field_name="started_at", widget=RangeWidget(attrs={"type": "date"})
+ )
+ resolved_by = filters.ModelMultipleChoiceFilter(
+ field_name="resolved_by_user",
+ queryset=get_user_queryset,
+ to_field_name="public_primary_key",
+ method=ModelFieldFilterMixin.filter_model_field.__name__,
+ )
+ acknowledged_by = filters.ModelMultipleChoiceFilter(
+ field_name="acknowledged_by_user",
+ queryset=get_user_queryset,
+ to_field_name="public_primary_key",
+ method=ModelFieldFilterMixin.filter_model_field.__name__,
+ )
+ invitees_are = filters.ModelMultipleChoiceFilter(
+ queryset=get_user_queryset, to_field_name="public_primary_key", method="filter_invitees_are"
+ )
+ with_resolution_note = filters.BooleanFilter(method="filter_with_resolution_note")
+
+ class Meta:
+ model = AlertGroup
+ fields = [
+ "id__in",
+ "resolved",
+ "acknowledged",
+ "started_at_gte",
+ "started_at_lte",
+ "resolved_at_lte",
+ "is_root",
+ "resolved_by",
+ "acknowledged_by",
+ ]
+
+ def filter_status(self, queryset, name, value):
+ if not value:
+ return queryset
+ try:
+ statuses = list(map(int, value))
+ except ValueError:
+ raise BadRequest(detail="Invalid status value")
+
+ filters = {}
+ q_objects = Q()
+
+ if AlertGroup.NEW in statuses:
+ filters["new"] = Q(silenced=False) & Q(acknowledged=False) & Q(resolved=False)
+ if AlertGroup.SILENCED in statuses:
+ filters["silenced"] = Q(silenced=True) & Q(acknowledged=False) & Q(resolved=False)
+ if AlertGroup.ACKNOWLEDGED in statuses:
+ filters["acknowledged"] = Q(acknowledged=True) & Q(resolved=False)
+ if AlertGroup.RESOLVED in statuses:
+ filters["resolved"] = Q(resolved=True)
+
+ for item in filters:
+ q_objects |= filters[item]
+
+ queryset = queryset.filter(q_objects)
+
+ return queryset
+
+ def filter_invitees_are(self, queryset, name, value):
+ users = value
+
+ if not users:
+ return queryset
+
+ queryset = queryset.filter(acknowledged=False, resolved=False, log_records__author__in=users).distinct()
+
+ return queryset
+
+ def filter_with_resolution_note(self, queryset, name, value):
+ if value is True:
+ queryset = queryset.filter(Q(resolution_notes__isnull=False, resolution_notes__deleted_at=None)).distinct()
+ elif value is False:
+ queryset = queryset.filter(
+ Q(resolution_notes__isnull=True) | ~Q(resolution_notes__deleted_at=None)
+ ).distinct()
+ return queryset
+
+
+class CustomSearchFilter(SearchFilter):
+ def must_call_distinct(self, queryset, search_fields):
+ """
+ Return True if 'distinct()' should be used to query the given lookups.
+ """
+ for search_field in search_fields:
+ opts = queryset.model._meta
+ if search_field[0] in self.lookup_prefixes:
+ search_field = search_field[1:]
+
+ # From https://github.com/encode/django-rest-framework/pull/6240/files#diff-01f357e474dd8fd702e4951b9227bffcR88
+ # Annotated fields do not need to be distinct
+ if isinstance(queryset, models.QuerySet) and search_field in queryset.query.annotations:
+ continue
+
+ parts = search_field.split(LOOKUP_SEP)
+ for part in parts:
+ field = opts.get_field(part)
+ if hasattr(field, "get_path_info"):
+ # This field is a relation, update opts to follow the relation
+ path_info = field.get_path_info()
+ opts = path_info[-1].to_opts
+ if any(path.m2m for path in path_info):
+ # This field is a m2m relation so we know we need to call distinct
+ return True
+ return False
+
+
+class AlertGroupView(
+ PreviewTemplateMixin,
+ PublicPrimaryKeyMixin,
+ mixins.RetrieveModelMixin,
+ mixins.ListModelMixin,
+ viewsets.GenericViewSet,
+):
+ authentication_classes = (
+ MobileAppAuthTokenAuthentication,
+ PluginAuthentication,
+ )
+ permission_classes = (IsAuthenticated, ActionPermission)
+
+ action_permissions = {
+ IsAdminOrEditor: (
+ *MODIFY_ACTIONS,
+ "acknowledge",
+ "unacknowledge",
+ "resolve",
+ "unresolve",
+ "attach",
+ "unattach",
+ "silence",
+ "unsilence",
+ "bulk_action",
+ "preview_template",
+ ),
+ AnyRole: (
+ *READ_ACTIONS,
+ "stats",
+ "filters",
+ "silence_options",
+ "bulk_action_options",
+ ),
+ }
+
+ http_method_names = ["get", "post"]
+
+ serializer_class = AlertGroupSerializer
+
+ pagination_class = FiftyPageSizePaginator
+
+ filter_backends = [CustomSearchFilter, filters.DjangoFilterBackend]
+ search_fields = ["cached_render_for_web_str"]
+
+ filterset_class = AlertGroupFilter
+
+ def list(self, request, *args, **kwargs):
+ """
+ It's compute-heavy so we rely on cache here.
+ Attention: Make sure to invalidate cache if you update the format!
+ """
+ queryset = self.filter_queryset(self.get_queryset(eager=False, readonly=True))
+
+ page = self.paginate_queryset(queryset)
+ skip_slow_rendering = request.query_params.get("skip_slow_rendering") == "true"
+ data = []
+
+ for alert_group in page:
+ if alert_group.cached_render_for_web == {}:
+ # We cannot give empty data to web. So caching synchronously here.
+ if skip_slow_rendering:
+ # We just return dummy data.
+ # Cache is not launched because after skip_slow_rendering request should come usual one
+ # which will start caching
+ data.append({"pk": alert_group.pk, "short": True})
+ else:
+ # Synchronously cache and return. It could be slow.
+ alert_group.cache_for_web(alert_group.channel.organization)
+ data.append(alert_group.cached_render_for_web)
+ else:
+ data.append(alert_group.cached_render_for_web)
+ if not skip_slow_rendering:
+ # Cache is not launched because after skip_slow_rendering request should come usual one
+ # which will start caching
+ alert_group.schedule_cache_for_web()
+
+ return self.get_paginated_response(data)
+
+ def get_queryset(self, eager=True, readonly=False, order=True):
+ if readonly:
+ queryset = AlertGroup.unarchived_objects.using_readonly_db
+ else:
+ queryset = AlertGroup.unarchived_objects
+
+ queryset = queryset.filter(
+ channel__organization=self.request.auth.organization,
+ channel__team=self.request.user.current_team,
+ )
+
+ if order:
+ queryset = queryset.order_by("-started_at")
+
+ queryset = queryset.annotate(cached_render_for_web_str=Cast("cached_render_for_web", output_field=CharField()))
+
+ if eager:
+ queryset = self.serializer_class.setup_eager_loading(queryset)
+ return queryset
+
+ def get_alert_groups_and_days_for_previous_same_period(self):
+ prev_alert_groups = AlertGroup.unarchived_objects.none()
+ delta_days = None
+
+ started_at = self.request.query_params.get("started_at", None)
+ if started_at is not None:
+ started_at_gte, started_at_lte = AlertGroupFilter.parse_custom_datetime_range(started_at)
+ delta_days = None
+ if started_at_lte is not None:
+ started_at_lte = forms.DateTimeField().to_python(started_at_lte)
+ else:
+ started_at_lte = datetime.now()
+
+ if started_at_gte is not None:
+ started_at_gte = forms.DateTimeField().to_python(value=started_at_gte)
+ delta = started_at_lte.replace(tzinfo=None) - started_at_gte.replace(tzinfo=None)
+ prev_alert_groups = self.get_queryset().filter(
+ started_at__range=[started_at_gte - delta, started_at_gte]
+ )
+ delta_days = delta.days
+ return prev_alert_groups, delta_days
+
+ @action(detail=False)
+ def stats(self, *args, **kwargs):
+ alert_groups = self.filter_queryset(self.get_queryset(eager=False))
+ # Only count field is used, other fields left just in case for the backward compatibility
+ return Response(
+ {
+ "count": alert_groups.filter().count(),
+ "count_previous_same_period": 0,
+ "alert_group_rate_to_previous_same_period": 1,
+ "count_escalations": 0,
+ "count_escalations_previous_same_period": 0,
+ "escalation_rate_to_previous_same_period": 1,
+ "average_response_time": None,
+ "average_response_time_to_previous_same_period": None,
+ "average_response_time_rate_to_previous_same_period": 0,
+ "prev_period_in_days": 1,
+ }
+ )
+
+ @action(methods=["post"], detail=True)
+ def acknowledge(self, request, pk):
+ alert_group = self.get_object()
+ if alert_group.is_maintenance_incident:
+ raise BadRequest(detail="Can't acknowledge maintenance alert group")
+ if alert_group.root_alert_group is not None:
+ raise BadRequest(detail="Can't acknowledge an attached alert group")
+ alert_group.acknowledge_by_user(self.request.user, action_source=ActionSource.WEB)
+ invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
+
+ return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
+
+ @action(methods=["post"], detail=True)
+ def unacknowledge(self, request, pk):
+ alert_group = self.get_object()
+ if alert_group.is_maintenance_incident:
+ raise BadRequest(detail="Can't unacknowledge maintenance alert group")
+
+ if alert_group.root_alert_group is not None:
+ raise BadRequest(detail="Can't unacknowledge an attached alert group")
+
+ if not alert_group.acknowledged:
+ raise BadRequest(detail="The alert group is not acknowledged")
+
+ if alert_group.resolved:
+ raise BadRequest(detail="Can't unacknowledge a resolved alert group")
+
+ alert_group.un_acknowledge_by_user(self.request.user, action_source=ActionSource.WEB)
+ invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
+
+ return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
+
+ @action(methods=["post"], detail=True)
+ def resolve(self, request, pk):
+ alert_group = self.get_object()
+ organization = self.request.user.organization
+
+ if alert_group.root_alert_group is not None:
+ raise BadRequest(detail="Can't resolve an attached alert group")
+
+ if alert_group.is_maintenance_incident:
+ alert_group.stop_maintenance(self.request.user)
+ else:
+ if organization.is_resolution_note_required and not alert_group.has_resolution_notes:
+ return Response(
+ data="Alert group without resolution note cannot be resolved due to organization settings.",
+ status=status.HTTP_400_BAD_REQUEST,
+ )
+ alert_group.resolve_by_user(self.request.user, action_source=ActionSource.WEB)
+ invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
+ return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
+
+ @action(methods=["post"], detail=True)
+ def unresolve(self, request, pk):
+ alert_group = self.get_object()
+ if alert_group.is_maintenance_incident:
+ raise BadRequest(detail="Can't unresolve maintenance alert group")
+
+ if alert_group.root_alert_group is not None:
+ raise BadRequest(detail="Can't unresolve an attached alert group")
+
+ if not alert_group.resolved:
+ raise BadRequest(detail="The alert group is not resolved")
+
+ alert_group.un_resolve_by_user(self.request.user, action_source=ActionSource.WEB)
+ invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
+ return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
+
+ @action(methods=["post"], detail=True)
+ def attach(self, request, pk=None):
+ alert_group = self.get_object()
+ if alert_group.is_maintenance_incident:
+ raise BadRequest(detail="Can't attach maintenance alert group")
+ if alert_group.dependent_alert_groups.count() > 0:
+ raise BadRequest(detail="Can't attach an alert group because it has another alert groups attached to it")
+ if not alert_group.is_root_alert_group:
+ raise BadRequest(detail="Can't attach an alert group because it has already been attached")
+
+ try:
+ root_alert_group = self.get_queryset().get(public_primary_key=request.data["root_alert_group_pk"])
+ except AlertGroup.DoesNotExist:
+ return Response(status=status.HTTP_400_BAD_REQUEST)
+ if root_alert_group.resolved or root_alert_group.root_alert_group is not None:
+ return Response(status=status.HTTP_400_BAD_REQUEST)
+ if root_alert_group == alert_group:
+ return Response(status=status.HTTP_400_BAD_REQUEST)
+
+ alert_group.attach_by_user(self.request.user, root_alert_group, action_source=ActionSource.WEB)
+ invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
+ invalidate_web_cache_for_alert_group(alert_group_pk=root_alert_group.pk)
+ return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
+
+ @action(methods=["post"], detail=True)
+ def unattach(self, request, pk=None):
+ alert_group = self.get_object()
+ if alert_group.is_maintenance_incident:
+ raise BadRequest(detail="Can't unattach maintenance alert group")
+ if alert_group.is_root_alert_group:
+ raise BadRequest(detail="Can't unattach an alert group because it is not attached")
+ root_alert_group_pk = alert_group.root_alert_group_id
+ alert_group.un_attach_by_user(self.request.user, action_source=ActionSource.WEB)
+ invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
+ invalidate_web_cache_for_alert_group(alert_group_pk=root_alert_group_pk)
+ return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
+
+ @action(methods=["post"], detail=True)
+ def silence(self, request, pk=None):
+ alert_group = self.get_object()
+
+ delay = request.data.get("delay")
+ if delay is None:
+ raise BadRequest(detail="Please specify a delay for silence")
+
+ if alert_group.root_alert_group is not None:
+ raise BadRequest(detail="Can't silence an attached alert group")
+
+ alert_group.silence_by_user(request.user, silence_delay=delay, action_source=ActionSource.WEB)
+ invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
+ return Response(AlertGroupSerializer(alert_group, context={"request": request}).data)
+
+ @action(methods=["get"], detail=False)
+ def silence_options(self, request):
+ data = [
+ {"value": value, "display_name": display_name} for value, display_name in AlertGroup.SILENCE_DELAY_OPTIONS
+ ]
+ return Response(data)
+
+ @action(methods=["post"], detail=True)
+ def unsilence(self, request, pk=None):
+ alert_group = self.get_object()
+
+ if not alert_group.silenced:
+ raise BadRequest(detail="The alert group is not silenced")
+
+ if alert_group.resolved:
+ raise BadRequest(detail="Can't unsilence a resolved alert group")
+
+ if alert_group.acknowledged:
+ raise BadRequest(detail="Can't unsilence an acknowledged alert group")
+
+ if alert_group.root_alert_group is not None:
+ raise BadRequest(detail="Can't unsilence an attached alert group")
+
+ alert_group.un_silence_by_user(request.user, action_source=ActionSource.WEB)
+
+ return Response(AlertGroupSerializer(alert_group, context={"request": request}).data)
+
+ @action(methods=["get"], detail=False)
+ def filters(self, request):
+ filter_name = request.query_params.get("search", None)
+ api_root = "/api/internal/v1/"
+
+ now = timezone.now()
+ week_ago = now - timedelta(days=7)
+
+ default_datetime_range = "{}/{}".format(
+ week_ago.strftime(DateRangeFilterMixin.DATE_FORMAT),
+ now.strftime(DateRangeFilterMixin.DATE_FORMAT),
+ )
+
+ filter_options = [
+ {"name": "search", "type": "search"},
+ {"name": "integration", "type": "options", "href": api_root + "alert_receive_channels/?filters=true"},
+ {
+ "name": "acknowledged_by",
+ "type": "options",
+ "href": api_root + "users/?filters=true&roles=0&roles=1&roles=2",
+ "default": {"display_name": self.request.user.username, "value": self.request.user.public_primary_key},
+ },
+ {
+ "name": "resolved_by",
+ "type": "options",
+ "href": api_root + "users/?filters=true&roles=0&roles=1&roles=2",
+ },
+ {
+ "name": "silenced_by",
+ "type": "options",
+ "href": api_root + "users/?filters=true&roles=0&roles=1&roles=2",
+ },
+ {
+ "name": "invitees_are",
+ "type": "options",
+ "href": api_root + "users/?filters=true&roles=0&roles=1&roles=2",
+ },
+ {
+ "name": "status",
+ "type": "options",
+ "options": [
+ {"display_name": "new", "value": AlertGroup.NEW},
+ {"display_name": "acknowledged", "value": AlertGroup.ACKNOWLEDGED},
+ {"display_name": "resolved", "value": AlertGroup.RESOLVED},
+ {"display_name": "silenced", "value": AlertGroup.SILENCED},
+ ],
+ },
+ # {'name': 'is_root', 'type': 'boolean', 'default': True},
+ {
+ "name": "started_at",
+ "type": "daterange",
+ "default": default_datetime_range,
+ },
+ {
+ "name": "resolved_at",
+ "type": "daterange",
+ "default": default_datetime_range,
+ },
+ {
+ "name": "with_resolution_note",
+ "type": "boolean",
+ "default": "true",
+ },
+ ]
+
+ if filter_name is not None:
+ filter_options = list(filter(lambda f: filter_name in f["name"], filter_options))
+
+ return Response(filter_options)
+
+ @action(methods=["post"], detail=False)
+ def bulk_action(self, request):
+ alert_group_public_pks = self.request.data.get("alert_group_pks", [])
+ action_with_incidents = self.request.data.get("action", None)
+ delay = self.request.data.get("delay")
+ kwargs = {}
+
+ if action_with_incidents not in AlertGroup.BULK_ACTIONS:
+ return Response("Unknown action", status=status.HTTP_400_BAD_REQUEST)
+
+ if action_with_incidents == AlertGroup.SILENCE:
+ if delay is None:
+ raise BadRequest(detail="Please specify a delay for silence")
+ kwargs["silence_delay"] = delay
+
+ alert_groups = self.get_queryset(eager=False).filter(public_primary_key__in=alert_group_public_pks)
+ alert_group_pks = list(alert_groups.values_list("id", flat=True))
+ invalidate_web_cache_for_alert_group(alert_group_pks=alert_group_pks)
+
+ kwargs["user"] = self.request.user
+ kwargs["alert_groups"] = alert_groups
+
+ method = getattr(AlertGroup, f"bulk_{action_with_incidents}")
+ method(**kwargs)
+
+ return Response(status=status.HTTP_200_OK)
+
+ @action(methods=["get"], detail=False)
+ def bulk_action_options(self, request):
+ return Response(
+ [{"value": action_name, "display_name": action_name} for action_name in AlertGroup.BULK_ACTIONS]
+ )
+
+ # This method is required for PreviewTemplateMixin
+ def get_alert_to_template(self):
+ return self.get_object().alerts.first()
diff --git a/engine/apps/api/views/alert_receive_channel.py b/engine/apps/api/views/alert_receive_channel.py
new file mode 100644
index 0000000000..ba20565e19
--- /dev/null
+++ b/engine/apps/api/views/alert_receive_channel.py
@@ -0,0 +1,206 @@
+from django.db.models import Q
+from django_filters import rest_framework as filters
+from django_filters.rest_framework import DjangoFilterBackend
+from rest_framework import status
+from rest_framework.decorators import action
+from rest_framework.filters import SearchFilter
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.viewsets import ModelViewSet
+
+from apps.alerts.models import AlertReceiveChannel
+from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission, AnyRole, IsAdmin, IsAdminOrEditor
+from apps.api.serializers.alert_receive_channel import (
+ AlertReceiveChannelSerializer,
+ AlertReceiveChannelUpdateSerializer,
+ FilterAlertReceiveChannelSerializer,
+)
+from apps.api.throttlers import DemoAlertThrottler
+from apps.auth_token.auth import PluginAuthentication
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.mixins import (
+ FilterSerializerMixin,
+ PreviewTemplateMixin,
+ PublicPrimaryKeyMixin,
+ UpdateSerializerMixin,
+)
+from common.exceptions import TeamCanNotBeChangedError, UnableToSendDemoAlert
+
+
+class AlertReceiveChannelFilter(filters.FilterSet):
+ maintenance_mode = filters.MultipleChoiceFilter(
+ choices=AlertReceiveChannel.MAINTENANCE_MODE_CHOICES, method="filter_maintenance_mode"
+ )
+ integration = filters.ChoiceFilter(choices=AlertReceiveChannel.INTEGRATION_CHOICES)
+
+ class Meta:
+ model = AlertReceiveChannel
+ fields = ["integration", "maintenance_mode", "team"]
+
+ def filter_maintenance_mode(self, queryset, name, value):
+ q_objects = Q()
+ if not value:
+ return queryset
+ for mode in value:
+ try:
+ mode = int(mode)
+ except (ValueError, TypeError):
+ raise BadRequest(detail="Invalid mode value")
+ if mode not in [AlertReceiveChannel.DEBUG_MAINTENANCE, AlertReceiveChannel.MAINTENANCE]:
+ raise BadRequest(detail="Invalid mode value")
+ q_objects |= Q(maintenance_mode=mode)
+
+ queryset = queryset.filter(q_objects)
+
+ return queryset
+
+
+class AlertReceiveChannelView(
+ PreviewTemplateMixin,
+ PublicPrimaryKeyMixin,
+ FilterSerializerMixin,
+ UpdateSerializerMixin,
+ ModelViewSet,
+):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, ActionPermission)
+ action_permissions = {
+ IsAdmin: (*MODIFY_ACTIONS, "stop_maintenance", "start_maintenance", "change_team"),
+ IsAdminOrEditor: ("send_demo_alert", "preview_template"),
+ AnyRole: (
+ *READ_ACTIONS,
+ "integration_options",
+ "maintenance_duration_options",
+ "maintenance_mode_options",
+ "counters",
+ "counters_per_integration",
+ ),
+ }
+
+ model = AlertReceiveChannel
+ serializer_class = AlertReceiveChannelSerializer
+ filter_serializer_class = FilterAlertReceiveChannelSerializer
+ update_serializer_class = AlertReceiveChannelUpdateSerializer
+
+ filter_backends = [SearchFilter, DjangoFilterBackend]
+ search_fields = ("verbal_name",)
+
+ filterset_class = AlertReceiveChannelFilter
+
+ def create(self, request, *args, **kwargs):
+ if request.data["integration"] is not None and (
+ request.data["integration"] in AlertReceiveChannel.WEB_INTEGRATION_CHOICES
+ ):
+ return super().create(request, *args, **kwargs)
+ return Response(data="invalid integration", status=status.HTTP_400_BAD_REQUEST)
+
+ def perform_update(self, serializer):
+ old_state = serializer.instance.repr_settings_for_client_side_logging
+ serializer.save()
+ new_state = serializer.instance.repr_settings_for_client_side_logging
+ description = f"Integration settings was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(
+ serializer.instance.organization,
+ self.request.user,
+ OrganizationLogType.TYPE_INTEGRATION_CHANGED,
+ description,
+ )
+
+ def perform_destroy(self, instance):
+ description = f"Integration {instance.verbal_name} was deleted"
+ create_organization_log(
+ instance.organization, self.request.user, OrganizationLogType.TYPE_INTEGRATION_DELETED, description
+ )
+ instance.delete()
+
+ def get_queryset(self, eager=True):
+ is_filters_request = self.request.query_params.get("filters", "false") == "true"
+ organization = self.request.auth.organization
+ if is_filters_request:
+ queryset = AlertReceiveChannel.objects_with_maintenance.filter(
+ organization=organization,
+ team=self.request.user.current_team,
+ )
+ else:
+ queryset = AlertReceiveChannel.objects.filter(
+ organization=organization,
+ team=self.request.user.current_team,
+ )
+ if eager:
+ queryset = self.serializer_class.setup_eager_loading(queryset)
+ return queryset
+
+ @action(detail=True, methods=["post"], throttle_classes=[DemoAlertThrottler])
+ def send_demo_alert(self, request, pk):
+ instance = AlertReceiveChannel.objects.get(public_primary_key=pk)
+ try:
+ instance.send_demo_alert()
+ except UnableToSendDemoAlert as e:
+ raise BadRequest(detail=str(e))
+ return Response(status=status.HTTP_200_OK)
+
+ @action(detail=False, methods=["get"])
+ def integration_options(self, request):
+ choices = []
+ for integration_id, integration_title in AlertReceiveChannel.INTEGRATION_CHOICES:
+ if integration_id in AlertReceiveChannel.WEB_INTEGRATION_CHOICES:
+ choice = {
+ "value": integration_id,
+ "display_name": integration_title,
+ "short_description": AlertReceiveChannel.INTEGRATION_SHORT_DESCRIPTION[integration_id],
+ "featured": integration_id in AlertReceiveChannel.INTEGRATION_FEATURED,
+ }
+ # if integration is featured we show it in the beginning
+ if choice["featured"]:
+ choices = [choice] + choices
+ else:
+ choices.append(choice)
+ return Response(choices)
+
+ @action(detail=True, methods=["put"])
+ def change_team(self, request, pk):
+ if "team_id" not in request.query_params:
+ raise BadRequest(detail="team_id must be specified")
+
+ team_id = request.query_params["team_id"]
+ if team_id == "null":
+ team_id = None
+
+ instance = self.get_object()
+
+ try:
+ instance.change_team(team_id=team_id, user=self.request.user)
+ except TeamCanNotBeChangedError as e:
+ raise BadRequest(detail=e)
+
+ return Response()
+
+ @action(methods=["get"], detail=False)
+ def counters(self, request):
+ queryset = self.filter_queryset(self.get_queryset(eager=False))
+ response = {}
+ for alert_receive_channel in queryset:
+ response[alert_receive_channel.public_primary_key] = {
+ "alerts_count": alert_receive_channel.alerts_count,
+ "alert_groups_count": alert_receive_channel.alert_groups_count,
+ }
+ return Response(response)
+
+ @action(methods=["get"], detail=True, url_path="counters")
+ def counters_per_integration(self, request, pk):
+ alert_receive_channel = self.get_object()
+ response = {
+ alert_receive_channel.public_primary_key: {
+ "alerts_count": alert_receive_channel.alerts_count,
+ "alert_groups_count": alert_receive_channel.alert_groups_count,
+ }
+ }
+ return Response(response)
+
+ # This method is required for PreviewTemplateMixin
+ def get_alert_to_template(self):
+ try:
+ return self.get_object().alert_groups.last().alerts.first()
+ except AttributeError:
+ return None
diff --git a/engine/apps/api/views/alert_receive_channel_template.py b/engine/apps/api/views/alert_receive_channel_template.py
new file mode 100644
index 0000000000..2845478da8
--- /dev/null
+++ b/engine/apps/api/views/alert_receive_channel_template.py
@@ -0,0 +1,52 @@
+from rest_framework import mixins, viewsets
+from rest_framework.permissions import IsAuthenticated
+
+from apps.alerts.models import AlertReceiveChannel
+from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission, AnyRole, IsAdmin
+from apps.api.serializers.alert_receive_channel import AlertReceiveChannelTemplatesSerializer
+from apps.auth_token.auth import PluginAuthentication
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.mixins import PublicPrimaryKeyMixin
+
+
+class AlertReceiveChannelTemplateView(
+ PublicPrimaryKeyMixin,
+ mixins.RetrieveModelMixin,
+ mixins.UpdateModelMixin,
+ viewsets.GenericViewSet,
+):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, ActionPermission)
+
+ action_permissions = {
+ IsAdmin: MODIFY_ACTIONS,
+ AnyRole: READ_ACTIONS,
+ }
+
+ model = AlertReceiveChannel
+ serializer_class = AlertReceiveChannelTemplatesSerializer
+
+ def get_queryset(self):
+ queryset = AlertReceiveChannel.objects.filter(
+ organization=self.request.auth.organization,
+ team=self.request.user.current_team,
+ )
+ return queryset
+
+ def update(self, request, *args, **kwargs):
+ instance = self.get_object()
+ old_state = instance.repr_settings_for_client_side_logging
+ result = super().update(request, *args, **kwargs)
+ instance = self.get_object()
+ new_state = instance.repr_settings_for_client_side_logging
+
+ if new_state != old_state:
+ description = f"Integration settings was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(
+ instance.organization,
+ self.request.user,
+ OrganizationLogType.TYPE_INTEGRATION_CHANGED,
+ description,
+ )
+
+ return result
diff --git a/engine/apps/api/views/apns_device.py b/engine/apps/api/views/apns_device.py
new file mode 100644
index 0000000000..ad3b817e64
--- /dev/null
+++ b/engine/apps/api/views/apns_device.py
@@ -0,0 +1,7 @@
+from push_notifications.api.rest_framework import APNSDeviceAuthorizedViewSet
+
+from apps.auth_token.auth import MobileAppAuthTokenAuthentication, PluginAuthentication
+
+
+class APNSDeviceAuthorizedViewSet(APNSDeviceAuthorizedViewSet):
+ authentication_classes = (MobileAppAuthTokenAuthentication, PluginAuthentication)
diff --git a/engine/apps/api/views/auth.py b/engine/apps/api/views/auth.py
new file mode 100644
index 0000000000..208898967b
--- /dev/null
+++ b/engine/apps/api/views/auth.py
@@ -0,0 +1,50 @@
+import logging
+from urllib.parse import urljoin
+
+from django.contrib.auth import REDIRECT_FIELD_NAME
+from django.http import HttpResponseRedirect
+from django.views.decorators.cache import never_cache
+from django.views.decorators.csrf import csrf_exempt
+from rest_framework.decorators import api_view, authentication_classes
+from rest_framework.response import Response
+from social_core.actions import do_auth, do_complete
+from social_django.utils import psa
+from social_django.views import _do_login
+
+from apps.auth_token.auth import PluginAuthentication, SlackTokenAuthentication
+
+logger = logging.getLogger(__name__)
+
+
+@api_view(["GET"])
+@authentication_classes([PluginAuthentication])
+@never_cache
+@psa("social:complete")
+def overridden_login_slack_auth(request, backend):
+ # We can't just redirect frontend here because we need to make a API call and pass tokens to this view from JS.
+ # So frontend can't follow our redirect.
+ # So wrapping and returning URL to redirect as a string.
+ url_to_redirect_to = do_auth(request.backend, redirect_name=REDIRECT_FIELD_NAME).url
+
+ return Response(url_to_redirect_to, 200)
+
+
+@api_view(["GET"])
+@authentication_classes([SlackTokenAuthentication])
+@never_cache
+@csrf_exempt
+@psa("social:complete")
+def overridden_complete_slack_auth(request, backend, *args, **kwargs):
+ """Authentication complete view"""
+ do_complete(
+ request.backend,
+ _do_login,
+ user=request.user,
+ redirect_name=REDIRECT_FIELD_NAME,
+ request=request,
+ *args,
+ **kwargs,
+ )
+ # We build the frontend url using org url since multiple stacks could be connected to one backend.
+ return_to = urljoin(request.user.organization.grafana_url, "/a/grafana-oncall-app/?page=chat-ops")
+ return HttpResponseRedirect(return_to)
diff --git a/engine/apps/api/views/channel_filter.py b/engine/apps/api/views/channel_filter.py
new file mode 100644
index 0000000000..9d9cc73d42
--- /dev/null
+++ b/engine/apps/api/views/channel_filter.py
@@ -0,0 +1,140 @@
+from django.db.models import OuterRef, Subquery
+from rest_framework import status
+from rest_framework.decorators import action
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.viewsets import ModelViewSet
+
+from apps.alerts.models import ChannelFilter
+from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission, AnyRole, IsAdmin, IsAdminOrEditor
+from apps.api.serializers.channel_filter import (
+ ChannelFilterCreateSerializer,
+ ChannelFilterSerializer,
+ ChannelFilterUpdateSerializer,
+)
+from apps.api.throttlers import DemoAlertThrottler
+from apps.auth_token.auth import PluginAuthentication
+from apps.slack.models import SlackChannel
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.mixins import CreateSerializerMixin, PublicPrimaryKeyMixin, UpdateSerializerMixin
+from common.exceptions import UnableToSendDemoAlert
+
+
+class ChannelFilterView(PublicPrimaryKeyMixin, CreateSerializerMixin, UpdateSerializerMixin, ModelViewSet):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, ActionPermission)
+ action_permissions = {
+ IsAdmin: (*MODIFY_ACTIONS, "move_to_position"),
+ IsAdminOrEditor: ("send_demo_alert",),
+ AnyRole: READ_ACTIONS,
+ }
+
+ model = ChannelFilter
+ serializer_class = ChannelFilterSerializer
+ update_serializer_class = ChannelFilterUpdateSerializer
+ create_serializer_class = ChannelFilterCreateSerializer
+
+ def get_queryset(self):
+ alert_receive_channel_id = self.request.query_params.get("alert_receive_channel", None)
+ lookup_kwargs = {}
+ if alert_receive_channel_id:
+ lookup_kwargs = {"alert_receive_channel__public_primary_key": alert_receive_channel_id}
+
+ slack_channels_subq = SlackChannel.objects.filter(
+ slack_id=OuterRef("slack_channel_id"),
+ slack_team_identity=self.request.auth.organization.slack_team_identity,
+ ).order_by("pk")
+
+ queryset = ChannelFilter.objects.filter(
+ **lookup_kwargs,
+ alert_receive_channel__organization=self.request.auth.organization,
+ alert_receive_channel__team=self.request.user.current_team,
+ alert_receive_channel__deleted_at=None,
+ ).annotate(
+ slack_channel_name=Subquery(slack_channels_subq.values("name")[:1]),
+ slack_channel_pk=Subquery(slack_channels_subq.values("public_primary_key")[:1]),
+ )
+ queryset = self.serializer_class.setup_eager_loading(queryset)
+ return queryset
+
+ def destroy(self, request, *args, **kwargs):
+ user = request.user
+ instance = self.get_object()
+ if instance.is_default:
+ raise BadRequest(detail="Unable to delete default filter")
+ else:
+ alert_receive_channel = instance.alert_receive_channel
+ route_verbal = instance.verbal_name_for_clients.capitalize()
+ description = f"{route_verbal} for integration {alert_receive_channel.verbal_name} was deleted"
+ create_organization_log(
+ user.organization, user, OrganizationLogType.TYPE_CHANNEL_FILTER_DELETED, description
+ )
+ self.perform_destroy(instance)
+ return Response(status=status.HTTP_204_NO_CONTENT)
+
+ def perform_create(self, serializer):
+ user = self.request.user
+ serializer.save()
+ instance = serializer.instance
+ alert_receive_channel = instance.alert_receive_channel
+ route_verbal = instance.verbal_name_for_clients.capitalize()
+ description = f"{route_verbal} was created for integration {alert_receive_channel.verbal_name}"
+ create_organization_log(user.organization, user, OrganizationLogType.TYPE_CHANNEL_FILTER_CREATED, description)
+
+ def perform_update(self, serializer):
+ user = self.request.user
+ old_state = serializer.instance.repr_settings_for_client_side_logging
+ serializer.save()
+ new_state = serializer.instance.repr_settings_for_client_side_logging
+ alert_receive_channel = serializer.instance.alert_receive_channel
+ route_verbal = serializer.instance.verbal_name_for_clients
+ description = (
+ f"Settings for {route_verbal} of integration {alert_receive_channel.verbal_name} "
+ f"was changed from:\n{old_state}\nto:\n{new_state}"
+ )
+ create_organization_log(user.organization, user, OrganizationLogType.TYPE_CHANNEL_FILTER_CHANGED, description)
+
+ @action(detail=True, methods=["put"])
+ def move_to_position(self, request, pk):
+ position = request.query_params.get("position", None)
+ if position is not None:
+ try:
+ source_filter = ChannelFilter.objects.get(public_primary_key=pk)
+ except ChannelFilter.DoesNotExist:
+ raise BadRequest(detail="Channel filter does not exist")
+ try:
+ if source_filter.is_default:
+ raise BadRequest(detail="Unable to change position for default filter")
+ user = self.request.user
+ old_state = source_filter.repr_settings_for_client_side_logging
+
+ source_filter.to(int(position))
+
+ new_state = source_filter.repr_settings_for_client_side_logging
+ alert_receive_channel = source_filter.alert_receive_channel
+ route_verbal = source_filter.verbal_name_for_clients
+ description = (
+ f"Settings for {route_verbal} of integration {alert_receive_channel.verbal_name} "
+ f"was changed from:\n{old_state}\nto:\n{new_state}"
+ )
+ create_organization_log(
+ user.organization,
+ user,
+ OrganizationLogType.TYPE_CHANNEL_FILTER_CHANGED,
+ description,
+ )
+ return Response(status=status.HTTP_200_OK)
+ except ValueError as e:
+ raise BadRequest(detail=f"{e}")
+ else:
+ raise BadRequest(detail="Position was not provided")
+
+ @action(detail=True, methods=["post"], throttle_classes=[DemoAlertThrottler])
+ def send_demo_alert(self, request, pk):
+ instance = ChannelFilter.objects.get(public_primary_key=pk)
+ try:
+ instance.send_demo_alert()
+ except UnableToSendDemoAlert as e:
+ raise BadRequest(detail=str(e))
+ return Response(status=status.HTTP_200_OK)
diff --git a/engine/apps/api/views/custom_button.py b/engine/apps/api/views/custom_button.py
new file mode 100644
index 0000000000..91901f710f
--- /dev/null
+++ b/engine/apps/api/views/custom_button.py
@@ -0,0 +1,94 @@
+from django.core.exceptions import ObjectDoesNotExist
+from rest_framework import status
+from rest_framework.decorators import action
+from rest_framework.exceptions import NotFound
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.viewsets import ModelViewSet
+
+from apps.alerts.models import AlertGroup, CustomButton
+from apps.alerts.tasks.custom_button_result import custom_button_result
+from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission, AnyRole, IsAdmin, IsAdminOrEditor
+from apps.api.serializers.custom_button import CustomButtonSerializer
+from apps.auth_token.auth import PluginAuthentication
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.mixins import PublicPrimaryKeyMixin
+
+
+class CustomButtonView(PublicPrimaryKeyMixin, ModelViewSet):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, ActionPermission)
+ action_permissions = {
+ IsAdmin: MODIFY_ACTIONS,
+ IsAdminOrEditor: ("action",),
+ AnyRole: READ_ACTIONS,
+ }
+
+ model = CustomButton
+ serializer_class = CustomButtonSerializer
+
+ def get_queryset(self):
+ queryset = CustomButton.objects.filter(
+ organization=self.request.auth.organization,
+ team=self.request.user.current_team,
+ )
+ return queryset
+
+ def get_object(self):
+ # Override this method because we want to get object from organization instead of concrete team.
+ pk = self.kwargs["pk"]
+ organization = self.request.auth.organization
+
+ try:
+ obj = organization.custom_buttons.get(public_primary_key=pk)
+ except ObjectDoesNotExist:
+ raise NotFound
+
+ # May raise a permission denied
+ self.check_object_permissions(self.request, obj)
+
+ return obj
+
+ def original_get_object(self):
+ return super().get_object()
+
+ def perform_create(self, serializer):
+ serializer.save()
+ instance = serializer.instance
+ organization = self.request.auth.organization
+ user = self.request.user
+ description = f"Custom action {instance.name} was created"
+ create_organization_log(organization, user, OrganizationLogType.TYPE_CUSTOM_ACTION_CREATED, description)
+
+ def perform_update(self, serializer):
+ organization = self.request.auth.organization
+ user = self.request.user
+ old_state = serializer.instance.repr_settings_for_client_side_logging
+ serializer.save()
+ new_state = serializer.instance.repr_settings_for_client_side_logging
+ description = f"Custom action {serializer.instance.name} was changed " f"from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(organization, user, OrganizationLogType.TYPE_CUSTOM_ACTION_CHANGED, description)
+
+ def perform_destroy(self, instance):
+ organization = self.request.auth.organization
+ user = self.request.user
+ description = f"Custom action {instance.name} was deleted"
+ create_organization_log(organization, user, OrganizationLogType.TYPE_CUSTOM_ACTION_DELETED, description)
+ instance.delete()
+
+ @action(detail=True, methods=["post"])
+ def action(self, request, pk):
+ alert_group_id = request.query_params.get("alert_group", None)
+ if alert_group_id is not None:
+ custom_button = self.original_get_object()
+ try:
+ alert_group = AlertGroup.unarchived_objects.get(
+ public_primary_key=alert_group_id, channel=custom_button.alert_receive_channel
+ )
+ custom_button_result.apply_async((custom_button.pk, alert_group.pk, self.request.user.pk))
+ except AlertGroup.DoesNotExist:
+ raise BadRequest(detail="AlertGroup does not exist or archived")
+ return Response(status=status.HTTP_200_OK)
+ else:
+ raise BadRequest(detail="AlertGroup is required")
diff --git a/engine/apps/api/views/escalation_chain.py b/engine/apps/api/views/escalation_chain.py
new file mode 100644
index 0000000000..839dcc9053
--- /dev/null
+++ b/engine/apps/api/views/escalation_chain.py
@@ -0,0 +1,136 @@
+from django.db.models import Count, Q
+from emoji import emojize
+from rest_framework import viewsets
+from rest_framework.decorators import action
+from rest_framework.filters import SearchFilter
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+
+from apps.alerts.models import EscalationChain
+from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission, AnyRole, IsAdmin
+from apps.api.serializers.escalation_chain import EscalationChainListSerializer, EscalationChainSerializer
+from apps.auth_token.auth import PluginAuthentication
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.mixins import ListSerializerMixin, PublicPrimaryKeyMixin
+
+
+class EscalationChainViewSet(PublicPrimaryKeyMixin, ListSerializerMixin, viewsets.ModelViewSet):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, ActionPermission)
+
+ action_permissions = {
+ IsAdmin: (*MODIFY_ACTIONS, "copy"),
+ AnyRole: (*READ_ACTIONS, "details"),
+ }
+
+ filter_backends = [SearchFilter]
+ search_fields = ("^name",)
+
+ serializer_class = EscalationChainSerializer
+ list_serializer_class = EscalationChainListSerializer
+
+ def get_queryset(self):
+ queryset = (
+ EscalationChain.objects.filter(
+ organization=self.request.auth.organization,
+ team=self.request.user.current_team,
+ )
+ .annotate(
+ num_integrations=Count(
+ "channel_filters__alert_receive_channel",
+ distinct=True,
+ filter=Q(channel_filters__alert_receive_channel__deleted_at__isnull=True),
+ )
+ )
+ .annotate(
+ num_routes=Count(
+ "channel_filters",
+ distinct=True,
+ filter=Q(channel_filters__alert_receive_channel__deleted_at__isnull=True),
+ )
+ )
+ )
+
+ return queryset
+
+ def perform_create(self, serializer):
+ serializer.save()
+
+ instance = serializer.instance
+ description = f"Escalation chain {instance.name} was created"
+ create_organization_log(
+ instance.organization,
+ self.request.user,
+ OrganizationLogType.TYPE_ESCALATION_CHAIN_CREATED,
+ description,
+ )
+
+ def perform_destroy(self, instance):
+ instance.delete()
+
+ description = f"Escalation chain {instance.name} was deleted"
+ create_organization_log(
+ instance.organization,
+ self.request.user,
+ OrganizationLogType.TYPE_ESCALATION_CHAIN_DELETED,
+ description,
+ )
+
+ def perform_update(self, serializer):
+ instance = serializer.instance
+ old_state = instance.repr_settings_for_client_side_logging
+
+ serializer.save()
+
+ new_state = instance.repr_settings_for_client_side_logging
+ description = f"Escalation chain {instance.name} was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(
+ instance.organization,
+ self.request.user,
+ OrganizationLogType.TYPE_ESCALATION_CHAIN_CHANGED,
+ description,
+ )
+
+ @action(methods=["post"], detail=True)
+ def copy(self, request, pk):
+ user = request.user
+ name = request.data.get("name")
+ if name is None:
+ raise BadRequest(detail={"name": ["This field may not be null."]})
+ else:
+ if EscalationChain.objects.filter(organization=request.auth.organization, name=name).exists():
+ raise BadRequest(detail={"name": ["Escalation chain with this name already exists."]})
+
+ obj = self.get_object()
+ copy = obj.make_copy(name)
+ serializer = self.get_serializer(copy)
+ description = f"Escalation chain {obj.name} was copied with new name {name}"
+ create_organization_log(copy.organization, user, OrganizationLogType.TYPE_CHANNEL_FILTER_CHANGED, description)
+ return Response(serializer.data)
+
+ @action(methods=["get"], detail=True)
+ def details(self, request, pk):
+ obj = self.get_object()
+ channel_filters = obj.channel_filters.filter(alert_receive_channel__deleted_at__isnull=True).values(
+ "public_primary_key",
+ "filtering_term",
+ "is_default",
+ "alert_receive_channel__public_primary_key",
+ "alert_receive_channel__verbal_name",
+ )
+ data = {}
+ for channel_filter in channel_filters:
+ channel_filter_data = {
+ "display_name": "Default Route" if channel_filter["is_default"] else channel_filter["filtering_term"],
+ "id": channel_filter["public_primary_key"],
+ }
+ data.setdefault(
+ channel_filter["alert_receive_channel__public_primary_key"],
+ {
+ "id": channel_filter["alert_receive_channel__public_primary_key"],
+ "display_name": emojize(channel_filter["alert_receive_channel__verbal_name"], use_aliases=True),
+ "channel_filters": [],
+ },
+ )["channel_filters"].append(channel_filter_data)
+ return Response(data.values())
diff --git a/engine/apps/api/views/escalation_policy.py b/engine/apps/api/views/escalation_policy.py
new file mode 100644
index 0000000000..b67b75426a
--- /dev/null
+++ b/engine/apps/api/views/escalation_policy.py
@@ -0,0 +1,171 @@
+from django.conf import settings
+from django.db.models import Q
+from rest_framework import status
+from rest_framework.decorators import action
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.viewsets import ModelViewSet
+
+from apps.alerts.models import EscalationPolicy
+from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission, AnyRole, IsAdmin
+from apps.api.serializers.escalation_policy import (
+ EscalationPolicyCreateSerializer,
+ EscalationPolicySerializer,
+ EscalationPolicyUpdateSerializer,
+)
+from apps.auth_token.auth import PluginAuthentication
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.mixins import CreateSerializerMixin, PublicPrimaryKeyMixin, UpdateSerializerMixin
+
+
+class EscalationPolicyView(PublicPrimaryKeyMixin, CreateSerializerMixin, UpdateSerializerMixin, ModelViewSet):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, ActionPermission)
+ action_permissions = {
+ IsAdmin: (*MODIFY_ACTIONS, "move_to_position"),
+ AnyRole: (
+ *READ_ACTIONS,
+ "escalation_options",
+ "delay_options",
+ "num_minutes_in_window_options",
+ ),
+ }
+
+ model = EscalationPolicy
+ serializer_class = EscalationPolicySerializer
+ update_serializer_class = EscalationPolicyUpdateSerializer
+ create_serializer_class = EscalationPolicyCreateSerializer
+
+ def get_queryset(self):
+ escalation_chain_id = self.request.query_params.get("escalation_chain")
+ user_id = self.request.query_params.get("user")
+ slack_channel_id = self.request.query_params.get("slack_channel")
+ channel_filter_id = self.request.query_params.get("channel_filter")
+
+ lookup_kwargs = {}
+ if escalation_chain_id is not None:
+ lookup_kwargs.update({"escalation_chain__public_primary_key": escalation_chain_id})
+ if user_id is not None:
+ lookup_kwargs.update({"notify_to_users_queue__public_primary_key": user_id})
+ if slack_channel_id is not None:
+ lookup_kwargs.update({"escalation_chain__channel_filters__slack_channel_id": slack_channel_id})
+ if channel_filter_id is not None:
+ lookup_kwargs.update({"escalation_chain__channel_filters__public_primary_key": channel_filter_id})
+
+ queryset = EscalationPolicy.objects.filter(
+ Q(**lookup_kwargs),
+ Q(escalation_chain__organization=self.request.auth.organization),
+ Q(escalation_chain__team=self.request.user.current_team),
+ Q(escalation_chain__channel_filters__alert_receive_channel__deleted_at=None),
+ Q(step__in=EscalationPolicy.INTERNAL_DB_STEPS) | Q(step__isnull=True),
+ ).distinct()
+
+ queryset = self.serializer_class.setup_eager_loading(queryset)
+ return queryset
+
+ def perform_create(self, serializer):
+ serializer.save()
+ instance = serializer.instance
+ organization = self.request.user.organization
+ user = self.request.user
+ description = (
+ f"Escalation step '{instance.step_type_verbal}' with order {instance.order} "
+ f"was created for escalation chain '{instance.escalation_chain.name}'"
+ )
+ create_organization_log(organization, user, OrganizationLogType.TYPE_ESCALATION_STEP_CREATED, description)
+
+ def perform_update(self, serializer):
+ organization = self.request.user.organization
+ user = self.request.user
+ old_state = serializer.instance.repr_settings_for_client_side_logging
+ serializer.save()
+ new_state = serializer.instance.repr_settings_for_client_side_logging
+ escalation_chain_name = serializer.instance.escalation_chain.name
+
+ description = (
+ f"Settings for escalation step of escalation chain '{escalation_chain_name}' "
+ f"was changed from:\n{old_state}\nto:\n{new_state}"
+ )
+ create_organization_log(organization, user, OrganizationLogType.TYPE_ESCALATION_STEP_CHANGED, description)
+
+ def perform_destroy(self, instance):
+ organization = self.request.user.organization
+ user = self.request.user
+ description = (
+ f"Escalation step '{instance.step_type_verbal}' with order {instance.order} of "
+ f"of escalation chain '{instance.escalation_chain.name}' was deleted"
+ )
+ create_organization_log(organization, user, OrganizationLogType.TYPE_ESCALATION_STEP_DELETED, description)
+ instance.delete()
+
+ @action(detail=True, methods=["put"])
+ def move_to_position(self, request, pk):
+ position = request.query_params.get("position", None)
+ if position is not None:
+ try:
+ source_step = EscalationPolicy.objects.get(public_primary_key=pk)
+ except EscalationPolicy.DoesNotExist:
+ raise BadRequest(detail="Step does not exist")
+ try:
+ user = self.request.user
+ old_state = source_step.repr_settings_for_client_side_logging
+
+ position = int(position)
+ source_step.to(position)
+
+ new_state = source_step.repr_settings_for_client_side_logging
+ escalation_chain_name = source_step.escalation_chain.name
+ description = (
+ f"Settings for escalation step of escalation chain '{escalation_chain_name}' "
+ f"was changed from:\n{old_state}\nto:\n{new_state}"
+ )
+ create_organization_log(
+ user.organization,
+ user,
+ OrganizationLogType.TYPE_ESCALATION_STEP_CHANGED,
+ description,
+ )
+
+ return Response(status=status.HTTP_200_OK)
+ except ValueError as e:
+ raise BadRequest(detail=f"{e}")
+
+ else:
+ raise BadRequest(detail="Position was not provided")
+
+ @action(detail=False, methods=["get"])
+ def escalation_options(self, request):
+ choices = []
+ for step in EscalationPolicy.INTERNAL_API_STEPS:
+ verbal = EscalationPolicy.INTERNAL_API_STEPS_TO_VERBAL_MAP[step]
+ can_change_importance = (
+ step in EscalationPolicy.IMPORTANT_STEPS_SET or step in EscalationPolicy.DEFAULT_STEPS_SET
+ )
+ slack_integration_required = step in EscalationPolicy.SLACK_INTEGRATION_REQUIRED_STEPS
+ if slack_integration_required and not settings.FEATURE_SLACK_INTEGRATION_ENABLED:
+ continue
+ choices.append(
+ {
+ "value": step,
+ "display_name": verbal[0],
+ "create_display_name": verbal[1],
+ "slack_integration_required": slack_integration_required,
+ "can_change_importance": can_change_importance,
+ }
+ )
+ return Response(choices)
+
+ @action(detail=False, methods=["get"])
+ def delay_options(self, request):
+ choices = []
+ for item in EscalationPolicy.WEB_DURATION_CHOICES:
+ choices.append({"value": str(item[0]), "sec_value": item[0], "display_name": item[1]})
+ return Response(choices)
+
+ @action(detail=False, methods=["get"])
+ def num_minutes_in_window_options(self, request):
+ choices = [
+ {"value": choice[0], "display_name": choice[1]} for choice in EscalationPolicy.WEB_DURATION_CHOICES_MINUTES
+ ]
+ return Response(choices)
diff --git a/engine/apps/api/views/features.py b/engine/apps/api/views/features.py
new file mode 100644
index 0000000000..6a4285de94
--- /dev/null
+++ b/engine/apps/api/views/features.py
@@ -0,0 +1,51 @@
+from django.apps import apps
+from django.conf import settings
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.auth_token.auth import PluginAuthentication
+
+FEATURE_SLACK = "slack"
+FEATURE_TELEGRAM = "telegram"
+FEATURE_LIVE_SETTINGS = "live_settings"
+MOBILE_APP_PUSH_NOTIFICATIONS = "mobile_app"
+
+
+class FeaturesAPIView(APIView):
+ """
+ Return whitelist of enabled features.
+ It is needed to disable features for On-prem installations.
+ """
+
+ authentication_classes = (PluginAuthentication,)
+
+ def get(self, request):
+ return Response(self._get_enabled_features(request))
+
+ def _get_enabled_features(self, request):
+ enabled_features = []
+
+ if settings.FEATURE_SLACK_INTEGRATION_ENABLED:
+ enabled_features.append(FEATURE_SLACK)
+
+ if settings.FEATURE_TELEGRAM_INTEGRATION_ENABLED:
+ enabled_features.append(FEATURE_TELEGRAM)
+
+ if settings.FEATURE_LIVE_SETTINGS_ENABLED:
+ enabled_features.append(FEATURE_LIVE_SETTINGS)
+
+ if settings.MOBILE_APP_PUSH_NOTIFICATIONS_ENABLED:
+ DynamicSetting = apps.get_model("base", "DynamicSetting")
+ mobile_app_settings = DynamicSetting.objects.get_or_create(
+ name="mobile_app_settings",
+ defaults={
+ "json_value": {
+ "org_ids": [],
+ }
+ },
+ )[0]
+
+ if request.auth.organization.pk in mobile_app_settings.json_value["org_ids"]:
+ enabled_features.append(MOBILE_APP_PUSH_NOTIFICATIONS)
+
+ return enabled_features
diff --git a/engine/apps/api/views/gitops.py b/engine/apps/api/views/gitops.py
new file mode 100644
index 0000000000..72ef4133cf
--- /dev/null
+++ b/engine/apps/api/views/gitops.py
@@ -0,0 +1,33 @@
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.alerts.terraform_renderer import TerraformFileRenderer, TerraformStateRenderer
+from apps.api.response_renderers import PlainTextRenderer
+from apps.auth_token.auth import PluginAuthentication
+
+
+class TerraformGitOpsView(APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ renderer_classes = [PlainTextRenderer]
+
+ def get(self, request):
+ organization = self.request.auth.organization
+ renderer = TerraformFileRenderer(organization)
+ terraform_file = renderer.render_terraform_file()
+ return Response(terraform_file)
+
+
+class TerraformStateView(APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ renderer_classes = (PlainTextRenderer,)
+
+ def get(self, request):
+ organization = self.request.auth.organization
+ renderer = TerraformStateRenderer(organization)
+ terraform_state = renderer.render_state()
+ return Response(terraform_state)
diff --git a/engine/apps/api/views/integration_heartbeat.py b/engine/apps/api/views/integration_heartbeat.py
new file mode 100644
index 0000000000..6f27db8b53
--- /dev/null
+++ b/engine/apps/api/views/integration_heartbeat.py
@@ -0,0 +1,78 @@
+from rest_framework import mixins, viewsets
+from rest_framework.decorators import action
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+
+from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission, AnyRole, IsAdmin
+from apps.api.serializers.integration_heartbeat import IntegrationHeartBeatSerializer
+from apps.auth_token.auth import PluginAuthentication
+from apps.heartbeat.models import IntegrationHeartBeat
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.mixins import PublicPrimaryKeyMixin
+
+
+class IntegrationHeartBeatView(
+ PublicPrimaryKeyMixin,
+ mixins.RetrieveModelMixin,
+ mixins.ListModelMixin,
+ mixins.CreateModelMixin,
+ mixins.UpdateModelMixin,
+ viewsets.GenericViewSet,
+):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, ActionPermission)
+ action_permissions = {
+ IsAdmin: (*MODIFY_ACTIONS, "activate", "deactivate"),
+ AnyRole: (*READ_ACTIONS, "timeout_options"),
+ }
+
+ model = IntegrationHeartBeat
+ serializer_class = IntegrationHeartBeatSerializer
+
+ def get_queryset(self):
+ alert_receive_channel_id = self.request.query_params.get("alert_receive_channel", None)
+ lookup_kwargs = {}
+ if alert_receive_channel_id:
+ lookup_kwargs = {"alert_receive_channel__public_primary_key": alert_receive_channel_id}
+ queryset = IntegrationHeartBeat.objects.filter(
+ **lookup_kwargs,
+ alert_receive_channel__organization=self.request.auth.organization,
+ alert_receive_channel__team=self.request.user.current_team,
+ )
+ queryset = self.serializer_class.setup_eager_loading(queryset)
+ return queryset
+
+ def perform_create(self, serializer):
+ serializer.save()
+ instance = serializer.instance
+ description = f"Heartbeat for integration {instance.alert_receive_channel.verbal_name} was created"
+ create_organization_log(
+ instance.alert_receive_channel.organization,
+ self.request.user,
+ OrganizationLogType.TYPE_HEARTBEAT_CREATED,
+ description,
+ )
+
+ def perform_update(self, serializer):
+ old_state = serializer.instance.repr_settings_for_client_side_logging
+ serializer.save()
+ new_state = serializer.instance.repr_settings_for_client_side_logging
+ alert_receive_channel = serializer.instance.alert_receive_channel
+ description = (
+ f"Settings for heartbeat of integration "
+ f"{alert_receive_channel.verbal_name} was changed "
+ f"from:\n{old_state}\nto:\n{new_state}"
+ )
+ create_organization_log(
+ alert_receive_channel.organization,
+ self.request.user,
+ OrganizationLogType.TYPE_HEARTBEAT_CHANGED,
+ description,
+ )
+
+ @action(detail=False, methods=["get"])
+ def timeout_options(self, request):
+ choices = []
+ for item in IntegrationHeartBeat.TIMEOUT_CHOICES:
+ choices.append({"value": item[0], "display_name": item[1]})
+ return Response(choices)
diff --git a/engine/apps/api/views/live_setting.py b/engine/apps/api/views/live_setting.py
new file mode 100644
index 0000000000..2ed6d72344
--- /dev/null
+++ b/engine/apps/api/views/live_setting.py
@@ -0,0 +1,88 @@
+from contextlib import suppress
+
+from django.conf import settings
+from django.core.exceptions import ImproperlyConfigured
+from django.http import HttpResponse
+from rest_framework import status, viewsets
+from rest_framework.permissions import IsAuthenticated
+from telegram import error
+
+from apps.api.permissions import IsAdmin
+from apps.api.serializers.live_setting import LiveSettingSerializer
+from apps.auth_token.auth import PluginAuthentication
+from apps.base.models import LiveSetting
+from apps.base.utils import live_settings
+from apps.slack.tasks import unpopulate_slack_user_identities
+from apps.telegram.client import TelegramClient
+from apps.telegram.tasks import register_telegram_webhook
+from apps.user_management.models import User
+from common.api_helpers.mixins import PublicPrimaryKeyMixin
+
+
+class LiveSettingViewSet(PublicPrimaryKeyMixin, viewsets.ModelViewSet):
+ serializer_class = LiveSettingSerializer
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, IsAdmin)
+
+ def dispatch(self, request, *args, **kwargs):
+ if not settings.FEATURE_LIVE_SETTINGS_ENABLED:
+ return HttpResponse(status=status.HTTP_404_NOT_FOUND)
+
+ return super().dispatch(request, *args, **kwargs)
+
+ def get_queryset(self):
+ LiveSetting.populate_settings_if_needed()
+ return LiveSetting.objects.filter(name__in=LiveSetting.AVAILABLE_NAMES).order_by("name")
+
+ def perform_update(self, serializer):
+ new_value = serializer.validated_data["value"]
+ self._update_hook(new_value)
+
+ super().perform_update(serializer)
+
+ def perform_destroy(self, instance):
+ new_value = instance.default_value
+ self._update_hook(new_value)
+
+ super().perform_destroy(instance)
+
+ def _update_hook(self, new_value):
+ instance = self.get_object()
+
+ if instance.name == "TELEGRAM_TOKEN":
+ try:
+ old_token = live_settings.TELEGRAM_TOKEN
+ except ImproperlyConfigured:
+ old_token = None
+
+ if old_token != new_value:
+ self._reset_telegram_integration(new_token=new_value)
+
+ for setting_name in ["SLACK_CLIENT_OAUTH_ID", "SLACK_CLIENT_OAUTH_SECRET"]:
+ if instance.name == setting_name:
+ if getattr(live_settings, setting_name) != new_value:
+ organization = self.request.auth.organization
+ sti = organization.slack_team_identity
+ if sti is not None:
+ unpopulate_slack_user_identities.apply_async((sti.pk, True), countdown=0)
+
+ def _reset_telegram_integration(self, new_token):
+ # tell Telegram to cancel sending events from old bot
+ with suppress(ImproperlyConfigured, error.InvalidToken, error.Unauthorized):
+ old_client = TelegramClient()
+ old_client.api_client.delete_webhook()
+
+ # delete telegram channels for current team
+ organization = self.request.auth.organization
+ organization.telegram_channel.all().delete()
+
+ # delete telegram connectors for users in team
+ users_with_telegram_connector = User.objects.filter(
+ organization=organization, telegram_connection__isnull=False
+ ).distinct()
+
+ for user in users_with_telegram_connector:
+ user.telegram_connection.delete()
+
+ # tell Telegram to send updates to new bot
+ register_telegram_webhook.delay(token=new_token)
diff --git a/engine/apps/api/views/maintenance.py b/engine/apps/api/views/maintenance.py
new file mode 100644
index 0000000000..bfb4c4f788
--- /dev/null
+++ b/engine/apps/api/views/maintenance.py
@@ -0,0 +1,117 @@
+from rest_framework import status
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.alerts.models import AlertReceiveChannel
+from apps.alerts.models.maintainable_object import MaintainableObject
+from apps.api.permissions import IsAdmin
+from apps.auth_token.auth import PluginAuthentication
+from common.api_helpers.exceptions import BadRequest
+from common.exceptions import MaintenanceCouldNotBeStartedError
+
+
+class GetObjectMixin:
+ def get_object(self, request):
+ organization = request.auth.organization
+ type = request.data.get("type", None)
+
+ if type == "organization":
+ instance = organization
+ elif type == "alert_receive_channel":
+ pk = request.data.get("alert_receive_channel_id", None)
+ if pk is not None:
+ try:
+ instance = AlertReceiveChannel.objects.get(
+ public_primary_key=pk,
+ organization=organization,
+ team=request.user.current_team,
+ )
+ except AlertReceiveChannel.DoesNotExist:
+ raise BadRequest(detail={"alert_receive_channel_id": ["unknown id"]})
+ else:
+ raise BadRequest(detail={"alert_receive_channel_id": ["id is required"]})
+ else:
+ raise BadRequest(detail={"type": ["Unknown type"]})
+
+ return instance
+
+
+class MaintenanceAPIView(APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ def get(self, request):
+ organization = self.request.auth.organization
+ response = []
+ integrations_under_maintenance = AlertReceiveChannel.objects.filter(
+ maintenance_mode__isnull=False, organization=organization
+ ).order_by("maintenance_started_at")
+
+ if organization.maintenance_mode is not None:
+ response.append(
+ {
+ "organization_id": organization.public_primary_key,
+ "type": "organization",
+ "maintenance_mode": organization.maintenance_mode,
+ "maintenance_till_timestamp": organization.till_maintenance_timestamp,
+ "started_at_timestamp": organization.started_at_timestamp,
+ }
+ )
+
+ for i in integrations_under_maintenance:
+ response.append(
+ {
+ "alert_receive_channel_id": i.public_primary_key,
+ "type": "alert_receive_channel",
+ "maintenance_mode": i.maintenance_mode,
+ "maintenance_till_timestamp": i.till_maintenance_timestamp,
+ "started_at_timestamp": i.started_at_timestamp,
+ }
+ )
+
+ return Response(response, status=200)
+
+
+class MaintenanceStartAPIView(GetObjectMixin, APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, IsAdmin)
+
+ def post(self, request):
+ mode = request.data.get("mode", None)
+ duration = request.data.get("duration", None)
+ try:
+ mode = int(mode)
+ except (ValueError, TypeError):
+ raise BadRequest(detail={"mode": ["Invalid mode"]})
+ if mode not in [MaintainableObject.DEBUG_MAINTENANCE, MaintainableObject.MAINTENANCE]:
+ raise BadRequest(detail={"mode": ["Unknown mode"]})
+ try:
+ duration = int(duration)
+ except (ValueError, TypeError):
+ raise BadRequest(detail={"duration": ["Invalid duration"]})
+ if duration not in MaintainableObject.maintenance_duration_options_in_seconds():
+ raise BadRequest(detail={"mode": ["Unknown duration"]})
+
+ instance = self.get_object(request)
+ try:
+ instance.start_maintenance(mode, duration, request.user)
+ except MaintenanceCouldNotBeStartedError as e:
+ if type(instance) == AlertReceiveChannel:
+ detail = {"alert_receive_channel_id": ["Already on maintenance"]}
+ else:
+ detail = str(e)
+ raise BadRequest(detail=detail)
+
+ return Response(status=status.HTTP_200_OK)
+
+
+class MaintenanceStopAPIView(GetObjectMixin, APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, IsAdmin)
+
+ def post(self, request):
+ instance = self.get_object(request)
+ user = request.user
+ instance.force_disable_maintenance(user)
+ return Response(status=status.HTTP_200_OK)
diff --git a/engine/apps/api/views/organization.py b/engine/apps/api/views/organization.py
new file mode 100644
index 0000000000..70af2f1dc4
--- /dev/null
+++ b/engine/apps/api/views/organization.py
@@ -0,0 +1,93 @@
+from contextlib import suppress
+
+from django.apps import apps
+from rest_framework import status
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.api.permissions import AnyRole, IsAdmin, MethodPermission
+from apps.api.serializers.organization import CurrentOrganizationSerializer
+from apps.auth_token.auth import PluginAuthentication
+from apps.base.messaging import get_messaging_backend_from_id
+from apps.telegram.client import TelegramClient
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+
+
+class CurrentOrganizationView(APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, MethodPermission)
+
+ method_permissions = {IsAdmin: ("PUT",), AnyRole: ("GET",)}
+
+ def get(self, request):
+ organization = request.auth.organization
+ serializer = CurrentOrganizationSerializer(organization, context={"request": request})
+ return Response(serializer.data)
+
+ def put(self, request):
+ organization = self.request.auth.organization
+ old_state = organization.repr_settings_for_client_side_logging
+ serializer = CurrentOrganizationSerializer(
+ instance=organization, data=request.data, context={"request": request}
+ )
+ serializer.is_valid(raise_exception=True)
+ serializer.save()
+ new_state = serializer.instance.repr_settings_for_client_side_logging
+ description = f"Organization settings was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(
+ organization, request.user, OrganizationLogType.TYPE_ORGANIZATION_SETTINGS_CHANGED, description
+ )
+ return Response(serializer.data)
+
+
+class GetTelegramVerificationCode(APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, IsAdmin)
+
+ def get(self, request):
+ organization = request.auth.organization
+ user = request.user
+ TelegramChannelVerificationCode = apps.get_model("telegram", "TelegramChannelVerificationCode")
+ with suppress(TelegramChannelVerificationCode.DoesNotExist):
+ existing_verification_code = organization.telegram_verification_code
+ existing_verification_code.delete()
+ new_code = TelegramChannelVerificationCode.objects.create(organization=organization, author=user)
+ telegram_client = TelegramClient()
+ bot_username = telegram_client.api_client.username
+ bot_link = f"https://t.me/{bot_username}"
+ return Response({"telegram_code": str(new_code.uuid), "bot_link": bot_link}, status=status.HTTP_200_OK)
+
+
+class GetChannelVerificationCode(APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, IsAdmin)
+
+ def get(self, request):
+ organization = request.auth.organization
+ backend_id = request.query_params.get("backend")
+ backend = get_messaging_backend_from_id(backend_id)
+ if backend is None:
+ return Response(status=status.HTTP_400_BAD_REQUEST)
+
+ code = backend.generate_channel_verification_code(organization)
+ return Response(code)
+
+
+class SetGeneralChannel(APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, IsAdmin)
+
+ def post(self, request):
+ SlackChannel = apps.get_model("slack", "SlackChannel")
+ organization = request.auth.organization
+ slack_team_identity = organization.slack_team_identity
+ slack_channel_id = request.data["id"]
+
+ slack_channel = SlackChannel.objects.get(
+ public_primary_key=slack_channel_id, slack_team_identity=slack_team_identity
+ )
+
+ organization.set_general_log_channel(slack_channel.slack_id, slack_channel.name, request.user)
+
+ return Response(status=200)
diff --git a/engine/apps/api/views/organization_log_record.py b/engine/apps/api/views/organization_log_record.py
new file mode 100644
index 0000000000..1d5087ed92
--- /dev/null
+++ b/engine/apps/api/views/organization_log_record.py
@@ -0,0 +1,128 @@
+from datetime import timedelta
+
+from django.db.models import Q
+from django.utils import timezone
+from django_filters import rest_framework as filters
+from django_filters.rest_framework import DjangoFilterBackend
+from rest_framework import mixins, viewsets
+from rest_framework.decorators import action
+from rest_framework.filters import SearchFilter
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+
+from apps.api.serializers.organization_log_record import OrganizationLogRecordSerializer
+from apps.auth_token.auth import PluginAuthentication
+from apps.base.models import OrganizationLogRecord
+from apps.user_management.models import User
+from common.api_helpers.filters import DateRangeFilterMixin, ModelFieldFilterMixin
+from common.api_helpers.paginators import FiftyPageSizePaginator
+
+LABEL_CHOICES = [[label, label] for label in OrganizationLogRecord.LABELS]
+
+
+def get_user_queryset(request):
+ if request is None:
+ return User.objects.none()
+
+ return User.objects.filter(organization=request.user.organization).distinct()
+
+
+class OrganizationLogRecordFilter(DateRangeFilterMixin, ModelFieldFilterMixin, filters.FilterSet):
+
+ author = filters.ModelMultipleChoiceFilter(
+ field_name="author",
+ queryset=get_user_queryset,
+ to_field_name="public_primary_key",
+ method=ModelFieldFilterMixin.filter_model_field.__name__,
+ )
+ created_at = filters.CharFilter(field_name="created_at", method=DateRangeFilterMixin.filter_date_range.__name__)
+ labels = filters.MultipleChoiceFilter(choices=LABEL_CHOICES, method="filter_labels")
+
+ class Meta:
+ model = OrganizationLogRecord
+ fields = ["author", "labels", "created_at"]
+
+ def filter_labels(self, queryset, name, value):
+ if not value:
+ return queryset
+
+ q_objects = Q()
+ for item in value:
+ q_objects &= Q(_labels__contains=item)
+
+ queryset = queryset.filter(q_objects)
+
+ return queryset
+
+
+class OrganizationLogRecordView(mixins.ListModelMixin, viewsets.GenericViewSet):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ serializer_class = OrganizationLogRecordSerializer
+
+ pagination_class = FiftyPageSizePaginator
+
+ filter_backends = (
+ SearchFilter,
+ DjangoFilterBackend,
+ )
+ search_fields = ("description",)
+ filterset_class = OrganizationLogRecordFilter
+
+ def get_queryset(self):
+ queryset = OrganizationLogRecord.objects.filter(organization=self.request.auth.organization).order_by(
+ "-created_at"
+ )
+ queryset = self.serializer_class.setup_eager_loading(queryset)
+ return queryset
+
+ @action(detail=False, methods=["get"])
+ def filters(self, request):
+ filter_name = request.query_params.get("filter_name", None)
+ api_root = "/api/internal/v1/"
+
+ filter_options = [
+ {
+ "name": "search",
+ "type": "search",
+ },
+ {
+ "name": "author",
+ "type": "options",
+ "href": api_root + "users/?filters=true&roles=0&roles=1&roles=2",
+ },
+ {
+ "name": "labels",
+ "type": "options",
+ "options": [
+ {
+ "display_name": label,
+ "value": label,
+ }
+ for label in OrganizationLogRecord.LABELS
+ ],
+ },
+ {
+ "name": "created_at",
+ "type": "daterange",
+ "default": f"{timezone.datetime.now() - timedelta(days=7):%Y-%m-%d/{timezone.datetime.now():%Y-%m-%d}}",
+ },
+ ]
+
+ if filter_name is not None:
+ filter_options = list(filter(lambda f: f["name"].startswith(filter_name), filter_options))
+
+ return Response(filter_options)
+
+ @action(detail=False, methods=["get"])
+ def label_options(self, request):
+ return Response(
+ [
+ {
+ "display_name": label,
+ "value": label,
+ }
+ for label in OrganizationLogRecord.LABELS
+ ]
+ )
diff --git a/engine/apps/api/views/preview_template_options.py b/engine/apps/api/views/preview_template_options.py
new file mode 100644
index 0000000000..1f9729134c
--- /dev/null
+++ b/engine/apps/api/views/preview_template_options.py
@@ -0,0 +1,19 @@
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.auth_token.auth import PluginAuthentication
+from common.api_helpers.mixins import NOTIFICATION_CHANNEL_OPTIONS, TEMPLATE_NAME_OPTIONS
+
+
+class PreviewTemplateOptionsView(APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ def get(self, request):
+ return Response(
+ {
+ "notification_channel_options": NOTIFICATION_CHANNEL_OPTIONS,
+ "template_name_options": TEMPLATE_NAME_OPTIONS,
+ }
+ )
diff --git a/engine/apps/api/views/public_api_tokens.py b/engine/apps/api/views/public_api_tokens.py
new file mode 100644
index 0000000000..57d0990127
--- /dev/null
+++ b/engine/apps/api/views/public_api_tokens.py
@@ -0,0 +1,55 @@
+from rest_framework import mixins, status, viewsets
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+
+from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, IsAdmin
+from apps.api.serializers.public_api_token import PublicApiTokenSerializer
+from apps.auth_token.auth import PluginAuthentication
+from apps.auth_token.constants import MAX_PUBLIC_API_TOKENS_PER_USER
+from apps.auth_token.models import ApiAuthToken
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.exceptions import BadRequest
+
+
+class PublicApiTokenView(
+ mixins.RetrieveModelMixin,
+ mixins.ListModelMixin,
+ mixins.CreateModelMixin,
+ mixins.DestroyModelMixin,
+ viewsets.GenericViewSet,
+):
+ authentication_classes = [PluginAuthentication]
+ permission_classes = [IsAuthenticated]
+
+ action_permissions = {IsAdmin: (*MODIFY_ACTIONS, *READ_ACTIONS)}
+
+ model = ApiAuthToken
+ serializer_class = PublicApiTokenSerializer
+
+ def get_queryset(self):
+ return ApiAuthToken.objects.filter(user=self.request.user, organization=self.request.user.organization)
+
+ def destroy(self, request, *args, **kwargs):
+ user = request.user
+ instance = self.get_object()
+ description = f"API token {instance.name} was revoked"
+ create_organization_log(user.organization, user, OrganizationLogType.TYPE_CHANNEL_FILTER_DELETED, description)
+ self.perform_destroy(instance)
+ return Response(status=status.HTTP_204_NO_CONTENT)
+
+ def create(self, request, *args, **kwargs):
+ user = request.user
+ token_name = request.data.get("name")
+
+ if (
+ ApiAuthToken.objects.filter(user=user, organization=user.organization).count()
+ >= MAX_PUBLIC_API_TOKENS_PER_USER
+ ):
+ raise BadRequest("Max token count")
+
+ if token_name is None or token_name == "":
+ raise BadRequest("Invalid token name")
+ instance, token = ApiAuthToken.create_auth_token(user, user.organization, token_name)
+ data = {"id": instance.pk, "token": token, "name": instance.name, "created_at": instance.created_at}
+
+ return Response(data, status=status.HTTP_201_CREATED)
diff --git a/engine/apps/api/views/resolution_note.py b/engine/apps/api/views/resolution_note.py
new file mode 100644
index 0000000000..8400addda8
--- /dev/null
+++ b/engine/apps/api/views/resolution_note.py
@@ -0,0 +1,54 @@
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.viewsets import ModelViewSet
+
+from apps.alerts.models import ResolutionNote
+from apps.alerts.tasks import send_update_resolution_note_signal
+from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission, AnyRole, IsAdminOrEditor
+from apps.api.serializers.resolution_note import ResolutionNoteSerializer, ResolutionNoteUpdateSerializer
+from apps.auth_token.auth import PluginAuthentication
+from common.api_helpers.mixins import PublicPrimaryKeyMixin, UpdateSerializerMixin
+
+
+class ResolutionNoteView(PublicPrimaryKeyMixin, UpdateSerializerMixin, ModelViewSet):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, ActionPermission)
+
+ action_permissions = {
+ IsAdminOrEditor: MODIFY_ACTIONS,
+ AnyRole: READ_ACTIONS,
+ }
+
+ model = ResolutionNote
+ serializer_class = ResolutionNoteSerializer
+ update_serializer_class = ResolutionNoteUpdateSerializer
+
+ def get_queryset(self):
+ alert_group_id = self.request.query_params.get("alert_group", None)
+ lookup_kwargs = {}
+ if alert_group_id:
+ lookup_kwargs = {"alert_group__public_primary_key": alert_group_id}
+ queryset = ResolutionNote.objects.filter(
+ **lookup_kwargs,
+ alert_group__channel__organization=self.request.auth.organization,
+ alert_group__channel__team=self.request.user.current_team,
+ )
+ queryset = self.serializer_class.setup_eager_loading(queryset)
+ return queryset
+
+ def dispatch(self, request, *args, **kwargs):
+ result = super().dispatch(request, *args, **kwargs)
+
+ # send signal to update alert group and resolution note
+ method = request.method.lower()
+ if method in ["post", "put", "patch", "delete"]:
+ instance_id = self.kwargs.get("pk") or result.data.get("id")
+ if instance_id:
+ instance = ResolutionNote.objects_with_deleted.filter(public_primary_key=instance_id).first()
+ if instance is not None:
+ send_update_resolution_note_signal.apply_async(
+ kwargs={
+ "alert_group_pk": instance.alert_group.pk,
+ "resolution_note_pk": instance.pk,
+ }
+ )
+ return result
diff --git a/engine/apps/api/views/route_regex_debugger.py b/engine/apps/api/views/route_regex_debugger.py
new file mode 100644
index 0000000000..527684ac43
--- /dev/null
+++ b/engine/apps/api/views/route_regex_debugger.py
@@ -0,0 +1,59 @@
+import json
+import re
+
+from django.db.models import Prefetch
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.alerts.incident_appearance.renderers.web_renderer import AlertWebRenderer
+from apps.alerts.models import Alert, AlertGroup
+from apps.auth_token.auth import PluginAuthentication
+from common.api_helpers.exceptions import BadRequest
+
+
+class RouteRegexDebuggerView(APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ def get(self, request):
+ organization = self.request.auth.organization
+ team = self.request.user.current_team
+
+ regex = request.query_params.get("regex", None)
+
+ if regex is None:
+ raise BadRequest(detail={"regex": ["This field is required."]})
+ if regex == "":
+ return Response([])
+ try:
+ re.compile(regex)
+ except re.error:
+ raise BadRequest(detail={"regex": ["Invalid regex."]})
+
+ incidents_matching_regex = []
+ MAX_INCIDENTS_TO_SHOW = 5
+ INCIDENTS_TO_LOOKUP = 100
+ for ag in (
+ AlertGroup.unarchived_objects.prefetch_related(Prefetch("alerts", queryset=Alert.objects.order_by("pk")))
+ .filter(channel__organization=organization, channel__team=team)
+ .order_by("-started_at")[:INCIDENTS_TO_LOOKUP]
+ ):
+
+ if len(incidents_matching_regex) < MAX_INCIDENTS_TO_SHOW:
+ first_alert = ag.alerts.all()[0]
+ if re.search(regex, json.dumps(first_alert.raw_request_data)):
+ if ag.cached_render_for_web:
+ title = ag.cached_render_for_web["render_for_web"]["title"]
+ else:
+ title = AlertWebRenderer(first_alert).render()["title"]
+ incidents_matching_regex.append(
+ {
+ "title": title,
+ "pk": ag.public_primary_key,
+ "payload": first_alert.raw_request_data,
+ "inside_organization_number": ag.inside_organization_number,
+ }
+ )
+
+ return Response(incidents_matching_regex)
diff --git a/engine/apps/api/views/schedule.py b/engine/apps/api/views/schedule.py
new file mode 100644
index 0000000000..9bc5554636
--- /dev/null
+++ b/engine/apps/api/views/schedule.py
@@ -0,0 +1,333 @@
+import datetime
+from urllib.parse import urljoin
+
+import pytz
+from django.conf import settings
+from django.core.exceptions import ObjectDoesNotExist
+from django.db.models import OuterRef, Subquery
+from django.db.utils import IntegrityError
+from django.urls import reverse
+from django.utils import dateparse, timezone
+from django.utils.functional import cached_property
+from rest_framework import status
+from rest_framework.decorators import action
+from rest_framework.exceptions import NotFound
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.views import Response
+from rest_framework.viewsets import ModelViewSet
+
+from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission, AnyRole, IsAdmin, IsAdminOrEditor
+from apps.api.serializers.schedule_polymorphic import (
+ PolymorphicScheduleCreateSerializer,
+ PolymorphicScheduleSerializer,
+ PolymorphicScheduleUpdateSerializer,
+)
+from apps.auth_token.auth import PluginAuthentication
+from apps.auth_token.constants import SCHEDULE_EXPORT_TOKEN_NAME
+from apps.auth_token.models import ScheduleExportAuthToken
+from apps.schedules.ical_utils import list_of_oncall_shifts_from_ical
+from apps.schedules.models import OnCallSchedule
+from apps.slack.models import SlackChannel
+from apps.slack.tasks import update_slack_user_group_for_schedules
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.exceptions import BadRequest, Conflict
+from common.api_helpers.mixins import CreateSerializerMixin, PublicPrimaryKeyMixin, UpdateSerializerMixin
+
+
+class ScheduleView(PublicPrimaryKeyMixin, CreateSerializerMixin, UpdateSerializerMixin, ModelViewSet):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, ActionPermission)
+ action_permissions = {
+ IsAdmin: (
+ *MODIFY_ACTIONS,
+ "reload_ical",
+ ),
+ IsAdminOrEditor: ("export_token",),
+ AnyRole: (
+ *READ_ACTIONS,
+ "events",
+ "notify_empty_oncall_options",
+ "notify_oncall_shift_freq_options",
+ "mention_options",
+ ),
+ }
+
+ queryset = OnCallSchedule.objects.all()
+ serializer_class = PolymorphicScheduleSerializer
+ create_serializer_class = PolymorphicScheduleCreateSerializer
+ update_serializer_class = PolymorphicScheduleUpdateSerializer
+
+ @cached_property
+ def can_update_user_groups(self):
+ """
+ This property is needed to be propagated down to serializers,
+ since it makes an API call to Slack and the response should be cached.
+ """
+ slack_team_identity = self.request.auth.organization.slack_team_identity
+
+ if slack_team_identity is None:
+ return False
+
+ user_group = slack_team_identity.usergroups.first()
+ if user_group is None:
+ return False
+
+ return user_group.can_be_updated
+
+ def get_serializer_context(self):
+ context = super().get_serializer_context()
+ context.update({"can_update_user_groups": self.can_update_user_groups})
+ return context
+
+ def get_queryset(self):
+ organization = self.request.auth.organization
+ slack_channels = SlackChannel.objects.filter(
+ slack_team_identity=organization.slack_team_identity,
+ slack_id=OuterRef("channel"),
+ )
+ queryset = OnCallSchedule.objects.filter(
+ organization=organization,
+ team=self.request.user.current_team,
+ ).annotate(
+ slack_channel_name=Subquery(slack_channels.values("name")[:1]),
+ slack_channel_pk=Subquery(slack_channels.values("public_primary_key")[:1]),
+ )
+ queryset = self.serializer_class.setup_eager_loading(queryset)
+ return queryset
+
+ def get_object(self):
+ # Override this method because we want to get object from organization instead of concrete team.
+ pk = self.kwargs["pk"]
+ organization = self.request.auth.organization
+ slack_channels = SlackChannel.objects.filter(
+ slack_team_identity=organization.slack_team_identity,
+ slack_id=OuterRef("channel"),
+ )
+ queryset = organization.oncall_schedules.filter(public_primary_key=pk,).annotate(
+ slack_channel_name=Subquery(slack_channels.values("name")[:1]),
+ slack_channel_pk=Subquery(slack_channels.values("public_primary_key")[:1]),
+ )
+
+ try:
+ obj = queryset.get()
+ except ObjectDoesNotExist:
+ raise NotFound
+
+ # May raise a permission denied
+ self.check_object_permissions(self.request, obj)
+
+ return obj
+
+ def original_get_object(self):
+ return super().get_object()
+
+ def perform_create(self, serializer):
+ schedule = serializer.save()
+ if schedule.user_group is not None:
+ update_slack_user_group_for_schedules.apply_async((schedule.user_group.pk,))
+ organization = self.request.auth.organization
+ user = self.request.user
+ description = f"Schedule {schedule.name} was created"
+ create_organization_log(organization, user, OrganizationLogType.TYPE_SCHEDULE_CREATED, description)
+
+ def perform_update(self, serializer):
+ organization = self.request.auth.organization
+ user = self.request.user
+ old_schedule = serializer.instance
+ old_state = old_schedule.repr_settings_for_client_side_logging
+ old_user_group = serializer.instance.user_group
+
+ updated_schedule = serializer.save()
+
+ if old_user_group is not None:
+ update_slack_user_group_for_schedules.apply_async((old_user_group.pk,))
+
+ if updated_schedule.user_group is not None and updated_schedule.user_group != old_user_group:
+ update_slack_user_group_for_schedules.apply_async((updated_schedule.user_group.pk,))
+
+ new_state = updated_schedule.repr_settings_for_client_side_logging
+ description = f"Schedule {updated_schedule.name} was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(organization, user, OrganizationLogType.TYPE_SCHEDULE_CHANGED, description)
+
+ def perform_destroy(self, instance):
+ organization = self.request.auth.organization
+ user = self.request.user
+ description = f"Schedule {instance.name} was deleted"
+ create_organization_log(organization, user, OrganizationLogType.TYPE_SCHEDULE_DELETED, description)
+ instance.delete()
+
+ if instance.user_group is not None:
+ update_slack_user_group_for_schedules.apply_async((instance.user_group.pk,))
+
+ def get_request_timezone(self):
+ user_tz = self.request.query_params.get("user_tz", "UTC")
+ try:
+ pytz.timezone(user_tz)
+ except pytz.exceptions.UnknownTimeZoneError:
+ raise BadRequest(detail="Invalid tz format")
+ date = timezone.now().date()
+ date_param = self.request.query_params.get("date")
+ if date_param is not None:
+ try:
+ date = dateparse.parse_date(date_param)
+ except ValueError:
+ raise BadRequest(detail="Invalid date format")
+ else:
+ if date is None:
+ raise BadRequest(detail="Invalid date format")
+
+ return user_tz, date
+
+ @action(detail=True, methods=["get"])
+ def events(self, request, pk):
+ user_tz, date = self.get_request_timezone()
+ with_empty = self.request.query_params.get("with_empty", False) == "true"
+ with_gap = self.request.query_params.get("with_gap", False) == "true"
+ schedule = self.original_get_object()
+ shifts = list_of_oncall_shifts_from_ical(schedule, date, user_tz, with_empty, with_gap) or []
+ events_result = []
+ # for start, end, users, priority_level, source in shifts:
+ for shift in shifts:
+ all_day = type(shift["start"]) == datetime.date
+ is_gap = shift.get("is_gap", False)
+ shift_json = {
+ "all_day": all_day,
+ "start": shift["start"],
+ # fix confusing end date for all-day event
+ "end": shift["end"] - timezone.timedelta(days=1) if all_day else shift["end"],
+ "users": [
+ {
+ "display_name": user.username,
+ "pk": user.public_primary_key,
+ }
+ for user in shift["users"]
+ ],
+ "priority_level": shift["priority"] if shift["priority"] != 0 else None,
+ "source": shift["source"],
+ "calendar_type": shift["calendar_type"],
+ "is_empty": len(shift["users"]) == 0 and not is_gap,
+ "is_gap": is_gap,
+ }
+ events_result.append(shift_json)
+
+ slack_channel = (
+ {
+ "id": schedule.slack_channel_pk,
+ "slack_id": schedule.channel,
+ "display_name": schedule.slack_channel_name,
+ }
+ if schedule.channel is not None
+ else None
+ )
+
+ result = {
+ "id": schedule.public_primary_key,
+ "name": schedule.name,
+ "type": PolymorphicScheduleSerializer().to_resource_type(schedule),
+ "slack_channel": slack_channel,
+ "events": events_result,
+ }
+ return Response(result, status=status.HTTP_200_OK)
+
+ @action(detail=False, methods=["get"])
+ def type_options(self, request):
+ # TODO: check if it needed
+ choices = []
+ for item in OnCallSchedule.SCHEDULE_CHOICES:
+ choices.append({"value": str(item[0]), "display_name": item[1]})
+ return Response(choices)
+
+ @action(detail=True, methods=["post"])
+ def reload_ical(self, request, pk):
+ schedule = self.original_get_object()
+ schedule.drop_cached_ical()
+ schedule.check_empty_shifts_for_next_week()
+ schedule.check_gaps_for_next_week()
+
+ if schedule.user_group is not None:
+ update_slack_user_group_for_schedules.apply_async((schedule.user_group.pk,))
+
+ return Response(status=status.HTTP_200_OK)
+
+ @action(detail=True, methods=["get", "post", "delete"])
+ def export_token(self, request, pk):
+ schedule = self.original_get_object()
+
+ if self.request.method == "GET":
+ try:
+ token = ScheduleExportAuthToken.objects.get(user_id=self.request.user.id, schedule_id=schedule.id)
+ except ScheduleExportAuthToken.DoesNotExist:
+ raise NotFound
+
+ response = {
+ "created_at": token.created_at,
+ "revoked_at": token.revoked_at,
+ "active": token.active,
+ }
+
+ return Response(response, status=status.HTTP_200_OK)
+
+ if self.request.method == "POST":
+ try:
+ instance, token = ScheduleExportAuthToken.create_auth_token(
+ request.user, request.user.organization, schedule
+ )
+ except IntegrityError:
+ raise Conflict("Schedule export token for user already exists")
+
+ export_url = urljoin(
+ settings.BASE_URL,
+ reverse("api-public:schedules-export", kwargs={"pk": schedule.public_primary_key})
+ + f"?{SCHEDULE_EXPORT_TOKEN_NAME}={token}",
+ )
+
+ data = {"token": token, "created_at": instance.created_at, "export_url": export_url}
+
+ return Response(data, status=status.HTTP_201_CREATED)
+
+ if self.request.method == "DELETE":
+ try:
+ token = ScheduleExportAuthToken.objects.get(user_id=self.request.user.id, schedule_id=schedule.id)
+ token.delete()
+ except ScheduleExportAuthToken.DoesNotExist:
+ raise NotFound
+
+ return Response(status=status.HTTP_204_NO_CONTENT)
+
+ @action(detail=False, methods=["get"])
+ def notify_oncall_shift_freq_options(self, request):
+ options = []
+ for choice in OnCallSchedule.NotifyOnCallShiftFreq.choices:
+ options.append(
+ {
+ "value": choice[0],
+ "display_name": choice[1],
+ }
+ )
+ return Response(options)
+
+ @action(detail=False, methods=["get"])
+ def notify_empty_oncall_options(self, request):
+ options = []
+ for choice in OnCallSchedule.NotifyEmptyOnCall.choices:
+ options.append(
+ {
+ "value": choice[0],
+ "display_name": choice[1],
+ }
+ )
+ return Response(options)
+
+ @action(detail=False, methods=["get"])
+ def mention_options(self, request):
+ options = [
+ {
+ "value": False,
+ "display_name": "Inform in channel without mention",
+ },
+ {
+ "value": True,
+ "display_name": "Mention person in slack",
+ },
+ ]
+ return Response(options)
diff --git a/engine/apps/api/views/slack_channel.py b/engine/apps/api/views/slack_channel.py
new file mode 100644
index 0000000000..a5237e5e54
--- /dev/null
+++ b/engine/apps/api/views/slack_channel.py
@@ -0,0 +1,32 @@
+from rest_framework import mixins
+from rest_framework.filters import SearchFilter
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.viewsets import GenericViewSet
+
+from apps.api.serializers.slack_channel import SlackChannelSerializer
+from apps.auth_token.auth import PluginAuthentication
+from apps.slack.models import SlackChannel
+from common.api_helpers.mixins import PublicPrimaryKeyMixin
+from common.api_helpers.paginators import HundredPageSizePaginator
+
+
+class SlackChannelView(PublicPrimaryKeyMixin, mixins.ListModelMixin, mixins.RetrieveModelMixin, GenericViewSet):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ pagination_class = HundredPageSizePaginator
+
+ model = SlackChannel
+ filter_backends = (SearchFilter,)
+ serializer_class = SlackChannelSerializer
+ search_fields = ["name"]
+
+ def get_queryset(self):
+ organization = self.request.auth.organization
+ slack_team_identity = organization.slack_team_identity
+ queryset = SlackChannel.objects.filter(
+ slack_team_identity=slack_team_identity,
+ is_archived=False,
+ )
+
+ return queryset
diff --git a/engine/apps/api/views/slack_team_settings.py b/engine/apps/api/views/slack_team_settings.py
new file mode 100644
index 0000000000..2afdfe7fdb
--- /dev/null
+++ b/engine/apps/api/views/slack_team_settings.py
@@ -0,0 +1,69 @@
+from rest_framework import views
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+
+from apps.api.permissions import AnyRole, IsAdmin, MethodPermission
+from apps.api.serializers.organization_slack_settings import OrganizationSlackSettingsSerializer
+from apps.auth_token.auth import PluginAuthentication
+from apps.user_management.models import Organization
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+
+
+class SlackTeamSettingsAPIView(views.APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, MethodPermission)
+
+ method_permissions = {
+ IsAdmin: ("PUT",),
+ AnyRole: ("GET",),
+ }
+
+ serializer_class = OrganizationSlackSettingsSerializer
+
+ def get(self, request):
+ organization = self.request.auth.organization
+ serializer = self.serializer_class(organization)
+ return Response(serializer.data)
+
+ def put(self, request):
+ organization = self.request.auth.organization
+ old_state = organization.repr_settings_for_client_side_logging
+ serializer = self.serializer_class(organization, data=request.data)
+ serializer.is_valid(raise_exception=True)
+ serializer.save()
+ new_state = serializer.instance.repr_settings_for_client_side_logging
+ description = f"Organization settings was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(
+ organization, request.user, OrganizationLogType.TYPE_ORGANIZATION_SETTINGS_CHANGED, description
+ )
+ return Response(serializer.data)
+
+
+class AcknowledgeReminderOptionsAPIView(views.APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ def get(self, request):
+ choices = []
+ for item in Organization.ACKNOWLEDGE_REMIND_CHOICES:
+ choices.append(
+ {"value": item[0], "sec_value": Organization.ACKNOWLEDGE_REMIND_DELAY[item[0]], "display_name": item[1]}
+ )
+ return Response(choices)
+
+
+class UnAcknowledgeTimeoutOptionsAPIView(views.APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ def get(self, request):
+ choices = []
+ for item in Organization.UNACKNOWLEDGE_TIMEOUT_CHOICES:
+ choices.append(
+ {
+ "value": item[0],
+ "sec_value": Organization.UNACKNOWLEDGE_TIMEOUT_DELAY[item[0]],
+ "display_name": item[1],
+ }
+ )
+ return Response(choices)
diff --git a/engine/apps/api/views/subscription.py b/engine/apps/api/views/subscription.py
new file mode 100644
index 0000000000..47b0e5f042
--- /dev/null
+++ b/engine/apps/api/views/subscription.py
@@ -0,0 +1,16 @@
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.auth_token.auth import PluginAuthentication
+
+
+class SubscriptionView(APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ def get(self, request):
+ raise NotImplementedError
+ organization = self.request.auth.organization
+ user = self.request.user
+ return Response(organization.get_subscription_web_report_for_user(user))
diff --git a/engine/apps/api/views/team.py b/engine/apps/api/views/team.py
new file mode 100644
index 0000000000..0a33e71fab
--- /dev/null
+++ b/engine/apps/api/views/team.py
@@ -0,0 +1,24 @@
+from rest_framework import mixins, viewsets
+from rest_framework.permissions import IsAuthenticated
+
+from apps.api.serializers.team import TeamSerializer
+from apps.auth_token.auth import MobileAppAuthTokenAuthentication, PluginAuthentication
+from apps.user_management.models import Team
+
+
+class TeamViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
+ authentication_classes = (
+ MobileAppAuthTokenAuthentication,
+ PluginAuthentication,
+ )
+ permission_classes = (IsAuthenticated,)
+
+ serializer_class = TeamSerializer
+
+ def get_queryset(self):
+ teams = list(self.request.user.teams.all())
+
+ # dirty hack to render "General" team in team select on the frontend
+ general_team = Team(public_primary_key=None, name="General", email=None, avatar_url=None)
+
+ return teams + [general_team]
diff --git a/engine/apps/api/views/telegram_channels.py b/engine/apps/api/views/telegram_channels.py
new file mode 100644
index 0000000000..73681cf7ae
--- /dev/null
+++ b/engine/apps/api/views/telegram_channels.py
@@ -0,0 +1,48 @@
+from django.apps import apps
+from rest_framework import mixins, status, viewsets
+from rest_framework.decorators import action
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+
+from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission, AnyRole, IsAdmin
+from apps.api.serializers.telegram import TelegramToOrganizationConnectorSerializer
+from apps.auth_token.auth import PluginAuthentication
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.mixins import PublicPrimaryKeyMixin
+
+
+class TelegramChannelViewSet(
+ PublicPrimaryKeyMixin,
+ mixins.RetrieveModelMixin,
+ mixins.DestroyModelMixin,
+ mixins.ListModelMixin,
+ viewsets.GenericViewSet,
+):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, ActionPermission)
+
+ action_permissions = {
+ IsAdmin: (*MODIFY_ACTIONS, "set_default"),
+ AnyRole: READ_ACTIONS,
+ }
+
+ serializer_class = TelegramToOrganizationConnectorSerializer
+
+ def get_queryset(self):
+ TelegramToOrganizationConnector = apps.get_model("telegram", "TelegramToOrganizationConnector")
+ return TelegramToOrganizationConnector.objects.filter(organization=self.request.user.organization)
+
+ @action(detail=True, methods=["post"])
+ def set_default(self, request, pk):
+ telegram_channel = self.get_object()
+ telegram_channel.make_channel_default(request.user)
+
+ return Response(status=status.HTTP_200_OK)
+
+ def perform_destroy(self, instance):
+ user = self.request.user
+ organization = user.organization
+
+ description = f"Telegram channel @{instance.channel_name} was disconnected from organization"
+ create_organization_log(organization, user, OrganizationLogType.TYPE_TELEGRAM_CHANNEL_DISCONNECTED, description)
+ instance.delete()
diff --git a/engine/apps/api/views/user.py b/engine/apps/api/views/user.py
new file mode 100644
index 0000000000..ee0a75dec6
--- /dev/null
+++ b/engine/apps/api/views/user.py
@@ -0,0 +1,486 @@
+import logging
+from urllib.parse import urljoin
+
+from django.apps import apps
+from django.conf import settings
+from django.core.exceptions import ObjectDoesNotExist
+from django.db.utils import IntegrityError
+from django.urls import reverse
+from django_filters import rest_framework as filters
+from rest_framework import mixins, status, viewsets
+from rest_framework.decorators import action
+from rest_framework.exceptions import NotFound
+from rest_framework.filters import SearchFilter
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.api.permissions import (
+ MODIFY_ACTIONS,
+ READ_ACTIONS,
+ ActionPermission,
+ AnyRole,
+ IsAdminOrEditor,
+ IsOwnerOrAdmin,
+)
+from apps.api.serializers.user import FilterUserSerializer, UserHiddenFieldsSerializer, UserSerializer
+from apps.auth_token.auth import (
+ MobileAppAuthTokenAuthentication,
+ MobileAppVerificationTokenAuthentication,
+ PluginAuthentication,
+)
+from apps.auth_token.constants import SCHEDULE_EXPORT_TOKEN_NAME
+from apps.auth_token.models import UserScheduleExportAuthToken
+from apps.auth_token.models.mobile_app_auth_token import MobileAppAuthToken
+from apps.auth_token.models.mobile_app_verification_token import MobileAppVerificationToken
+from apps.base.messaging import get_messaging_backend_from_id
+from apps.telegram.client import TelegramClient
+from apps.telegram.models import TelegramVerificationCode
+from apps.twilioapp.phone_manager import PhoneManager
+from apps.twilioapp.twilio_client import twilio_client
+from apps.user_management.models import User
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.exceptions import Conflict
+from common.api_helpers.mixins import FilterSerializerMixin, PublicPrimaryKeyMixin
+from common.api_helpers.paginators import HundredPageSizePaginator
+from common.constants.role import Role
+
+logger = logging.getLogger(__name__)
+
+
+class CurrentUserView(APIView):
+ authentication_classes = (
+ MobileAppAuthTokenAuthentication,
+ PluginAuthentication,
+ )
+ permission_classes = (IsAuthenticated,)
+
+ def get(self, request):
+ serializer = UserSerializer(request.user, context={"request": self.request})
+ return Response(serializer.data)
+
+ def put(self, request):
+ serializer = UserSerializer(request.user, data=self.request.data, context={"request": self.request})
+ serializer.is_valid(raise_exception=True)
+ serializer.save()
+ return Response(serializer.data)
+
+
+class UserFilter(filters.FilterSet):
+ """
+ https://django-filter.readthedocs.io/en/master/guide/rest_framework.html
+ """
+
+ email = filters.CharFilter(field_name="email", lookup_expr="icontains")
+ roles = filters.MultipleChoiceFilter(field_name="role", choices=Role.choices())
+
+ class Meta:
+ model = User
+ fields = ["email", "roles"]
+
+
+class UserView(
+ PublicPrimaryKeyMixin,
+ FilterSerializerMixin,
+ mixins.RetrieveModelMixin,
+ mixins.UpdateModelMixin,
+ mixins.ListModelMixin,
+ viewsets.GenericViewSet,
+):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, ActionPermission)
+
+ # Non-admin users are allowed to list and retrieve users
+ # The overridden get_serializer_class will return
+ # another Serializer for non-admin users with sensitive information hidden
+ action_permissions = {
+ IsAdminOrEditor: (
+ *MODIFY_ACTIONS,
+ "list",
+ "metadata",
+ "verify_number",
+ "forget_number",
+ "get_verification_code",
+ "get_backend_verification_code",
+ "get_telegram_verification_code",
+ "unlink_telegram",
+ "unlink_backend",
+ "make_test_call",
+ "export_token",
+ "mobile_app_verification_token",
+ "mobile_app_auth_token",
+ ),
+ AnyRole: ("retrieve",),
+ }
+
+ action_object_permissions = {
+ IsOwnerOrAdmin: (
+ *MODIFY_ACTIONS,
+ *READ_ACTIONS,
+ "verify_number",
+ "forget_number",
+ "get_verification_code",
+ "get_backend_verification_code",
+ "get_telegram_verification_code",
+ "unlink_telegram",
+ "unlink_backend",
+ "make_test_call",
+ "export_token",
+ "mobile_app_verification_token",
+ "mobile_app_auth_token",
+ ),
+ }
+
+ filter_serializer_class = FilterUserSerializer
+
+ pagination_class = HundredPageSizePaginator
+
+ filter_backends = (SearchFilter, filters.DjangoFilterBackend)
+ # NB start search params
+ # '^' Starts-with search.
+ # '=' Exact matches.
+ # '@' Full-text search. (Currently only supported Django's MySQL backend.)
+ # '$' Regex search.
+ search_fields = (
+ "^email",
+ "^username",
+ "^slack_user_identity__cached_slack_login",
+ "^slack_user_identity__cached_name",
+ )
+
+ filterset_class = UserFilter
+
+ def get_serializer_class(self):
+ is_filters_request = self.request.query_params.get("filters", "false") == "true"
+ if self.action in ["list"] and is_filters_request:
+ return self.get_filter_serializer_class()
+
+ is_user_retrieves_own_data = (
+ self.action == "retrieve"
+ and self.kwargs.get("pk") is not None
+ and self.kwargs.get("pk") == self.request.user.public_primary_key
+ )
+ if is_user_retrieves_own_data or self.request.user.role == Role.ADMIN:
+ return UserSerializer
+ return UserHiddenFieldsSerializer
+
+ def get_queryset(self):
+ slack_identity = self.request.query_params.get("slack_identity", None) == "true"
+
+ queryset = User.objects.filter(organization=self.request.user.organization)
+
+ if self.request.user.current_team is not None:
+ queryset = queryset.filter(teams=self.request.user.current_team).distinct()
+
+ queryset = self.get_serializer_class().setup_eager_loading(queryset)
+
+ if slack_identity:
+ queryset = queryset.filter(slack_user_identity__isnull=False).distinct()
+
+ return queryset.order_by("id")
+
+ def current(self, request):
+ serializer = UserSerializer(self.get_queryset().get(pk=self.request.user.pk))
+ return Response(serializer.data)
+
+ @action(detail=True, methods=["get"])
+ def get_verification_code(self, request, pk):
+ user = self.get_object()
+ phone_manager = PhoneManager(user)
+ code_sent = phone_manager.send_verification_code()
+
+ if not code_sent:
+ return Response(status=status.HTTP_400_BAD_REQUEST)
+ return Response(status=status.HTTP_200_OK)
+
+ @action(detail=True, methods=["put"])
+ def verify_number(self, request, pk):
+ target_user = self.get_object()
+ code = request.query_params.get("token", None)
+ old_state = target_user.repr_settings_for_client_side_logging
+ phone_manager = PhoneManager(target_user)
+ verified, error = phone_manager.verify_phone_number(code)
+
+ if not verified:
+ return Response(error, status=status.HTTP_400_BAD_REQUEST)
+ organization = request.auth.organization
+ new_state = target_user.repr_settings_for_client_side_logging
+ description = f"User settings for user {target_user.username} was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(
+ organization,
+ request.user,
+ OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
+ description,
+ )
+ return Response(status=status.HTTP_200_OK)
+
+ @action(detail=True, methods=["put"])
+ def forget_number(self, request, pk):
+ target_user = self.get_object()
+ old_state = target_user.repr_settings_for_client_side_logging
+ phone_manager = PhoneManager(target_user)
+ forget = phone_manager.forget_phone_number()
+
+ if forget:
+ organization = request.auth.organization
+ new_state = target_user.repr_settings_for_client_side_logging
+ description = (
+ f"User settings for user {target_user.username} was changed from:\n{old_state}\nto:\n{new_state}"
+ )
+ create_organization_log(
+ organization,
+ request.user,
+ OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
+ description,
+ )
+ return Response(status=status.HTTP_200_OK)
+
+ @action(detail=True, methods=["post"])
+ def make_test_call(self, request, pk):
+ user = self.get_object()
+ phone_number = user.verified_phone_number
+
+ if phone_number is None:
+ return Response(status=status.HTTP_400_BAD_REQUEST)
+
+ try:
+ twilio_client.make_test_call(to=phone_number)
+ except Exception as e:
+ logger.error(f"Unable to make a test call due to {e}")
+ return Response(
+ data="Something went wrong while making a test call", status=status.HTTP_500_INTERNAL_SERVER_ERROR
+ )
+
+ return Response(status=status.HTTP_200_OK)
+
+ @action(detail=True, methods=["get"])
+ def get_backend_verification_code(self, request, pk):
+ backend_id = request.query_params.get("backend")
+ backend = get_messaging_backend_from_id(backend_id)
+ if backend is None:
+ return Response(status=status.HTTP_400_BAD_REQUEST)
+
+ user = self.get_object()
+ code = backend.generate_user_verification_code(user)
+ return Response(code)
+
+ @action(detail=True, methods=["get"])
+ def get_telegram_verification_code(self, request, pk):
+ user = self.get_object()
+
+ if not user.is_telegram_connected:
+ return Response(status=status.HTTP_400_BAD_REQUEST)
+
+ try:
+ existing_verification_code = user.telegram_verification_code
+ existing_verification_code.delete()
+ except TelegramVerificationCode.DoesNotExist:
+ pass
+
+ new_code = TelegramVerificationCode(user=user)
+ new_code.save()
+
+ telegram_client = TelegramClient()
+ bot_username = telegram_client.api_client.username
+ bot_link = f"https://t.me/{bot_username}"
+
+ return Response({"telegram_code": str(new_code.uuid), "bot_link": bot_link}, status=status.HTTP_200_OK)
+
+ @action(detail=True, methods=["post"])
+ def unlink_telegram(self, request, pk):
+ user = self.get_object()
+ TelegramToUserConnector = apps.get_model("telegram", "TelegramToUserConnector")
+
+ try:
+ connector = TelegramToUserConnector.objects.get(user=user)
+ connector.delete()
+ except TelegramToUserConnector.DoesNotExist:
+ return Response(status=status.HTTP_400_BAD_REQUEST)
+
+ description = f"Telegram account of user {user.username} was disconnected"
+ create_organization_log(
+ user.organization,
+ user,
+ OrganizationLogType.TYPE_TELEGRAM_FROM_USER_DISCONNECTED,
+ description,
+ )
+
+ return Response(status=status.HTTP_200_OK)
+
+ @action(detail=True, methods=["post"])
+ def unlink_backend(self, request, pk):
+ backend_id = request.query_params.get("backend")
+ backend = get_messaging_backend_from_id(backend_id)
+ if backend is None:
+ return Response(status=status.HTTP_400_BAD_REQUEST)
+
+ user = self.get_object()
+ try:
+ backend.unlink_user(user)
+ except ObjectDoesNotExist:
+ return Response(status=status.HTTP_400_BAD_REQUEST)
+
+ description = f"{backend.label} account of user {user.username} was disconnected"
+ create_organization_log(
+ user.organization,
+ user,
+ OrganizationLogType.TYPE_MESSAGING_BACKEND_USER_DISCONNECTED,
+ description,
+ )
+
+ return Response(status=status.HTTP_200_OK)
+
+ @action(detail=True, methods=["get", "post", "delete"])
+ def export_token(self, request, pk):
+ user = self.get_object()
+
+ if self.request.method == "GET":
+ try:
+ token = UserScheduleExportAuthToken.objects.get(user=user)
+ except UserScheduleExportAuthToken.DoesNotExist:
+ raise NotFound
+
+ response = {
+ "created_at": token.created_at,
+ "revoked_at": token.revoked_at,
+ "active": token.active,
+ }
+ return Response(response, status=status.HTTP_200_OK)
+
+ if self.request.method == "POST":
+ try:
+ instance, token = UserScheduleExportAuthToken.create_auth_token(user, user.organization)
+ except IntegrityError:
+ raise Conflict("Schedule export token for user already exists")
+
+ export_url = urljoin(
+ settings.BASE_URL,
+ reverse("api-public:users-schedule-export", kwargs={"pk": user.public_primary_key})
+ + f"?{SCHEDULE_EXPORT_TOKEN_NAME}={token}",
+ )
+
+ data = {"token": token, "created_at": instance.created_at, "export_url": export_url}
+ return Response(data, status=status.HTTP_201_CREATED)
+
+ if self.request.method == "DELETE":
+ try:
+ token = UserScheduleExportAuthToken.objects.get(user=user)
+ token.delete()
+ except UserScheduleExportAuthToken.DoesNotExist:
+ raise NotFound
+
+ return Response(status=status.HTTP_204_NO_CONTENT)
+
+ @action(detail=True, methods=["get", "post", "delete"])
+ def mobile_app_verification_token(self, request, pk):
+ DynamicSetting = apps.get_model("base", "DynamicSetting")
+
+ if not settings.MOBILE_APP_PUSH_NOTIFICATIONS_ENABLED:
+ return Response(status=status.HTTP_404_NOT_FOUND)
+
+ mobile_app_settings = DynamicSetting.objects.get_or_create(
+ name="mobile_app_settings",
+ defaults={
+ "json_value": {
+ "org_ids": [],
+ }
+ },
+ )[0]
+ if self.request.auth.organization.pk not in mobile_app_settings.json_value["org_ids"]:
+ return Response(status=status.HTTP_404_NOT_FOUND)
+
+ user = self.get_object()
+
+ if self.request.method == "GET":
+ try:
+ token = MobileAppVerificationToken.objects.get(user=user)
+ except MobileAppVerificationToken.DoesNotExist:
+ raise NotFound
+
+ response = {
+ "token_id": token.id,
+ "user_id": token.user_id,
+ "organization_id": token.organization_id,
+ "created_at": token.created_at,
+ "revoked_at": token.revoked_at,
+ }
+ return Response(response, status=status.HTTP_200_OK)
+
+ if self.request.method == "POST":
+ # If token already exists revoke it
+ try:
+ token = MobileAppVerificationToken.objects.get(user=user)
+ token.delete()
+ except MobileAppVerificationToken.DoesNotExist:
+ pass
+
+ instance, token = MobileAppVerificationToken.create_auth_token(user, user.organization)
+ data = {"id": instance.pk, "token": token, "created_at": instance.created_at}
+ return Response(data, status=status.HTTP_201_CREATED)
+
+ if self.request.method == "DELETE":
+ try:
+ token = MobileAppVerificationToken.objects.get(user=user)
+ token.delete()
+ except MobileAppVerificationToken.DoesNotExist:
+ raise NotFound
+
+ return Response(status=status.HTTP_204_NO_CONTENT)
+
+ @action(
+ methods=["get", "post", "delete"],
+ detail=False,
+ authentication_classes=(MobileAppVerificationTokenAuthentication,),
+ )
+ def mobile_app_auth_token(self, request):
+ DynamicSetting = apps.get_model("base", "DynamicSetting")
+
+ if not settings.MOBILE_APP_PUSH_NOTIFICATIONS_ENABLED:
+ return Response(status=status.HTTP_404_NOT_FOUND)
+
+ mobile_app_settings = DynamicSetting.objects.get_or_create(
+ name="mobile_app_settings",
+ defaults={
+ "json_value": {
+ "org_ids": [],
+ }
+ },
+ )[0]
+ if self.request.auth.organization.pk not in mobile_app_settings.json_value["org_ids"]:
+ return Response(status=status.HTTP_404_NOT_FOUND)
+
+ if self.request.method == "GET":
+ try:
+ token = MobileAppAuthToken.objects.get(user=self.request.user)
+ except MobileAppAuthToken.DoesNotExist:
+ raise NotFound
+
+ response = {
+ "token_id": token.id,
+ "user_id": token.user_id,
+ "organization_id": token.organization_id,
+ "created_at": token.created_at,
+ "revoked_at": token.revoked_at,
+ }
+ return Response(response, status=status.HTTP_200_OK)
+
+ if self.request.method == "POST":
+ # If token already exists revoke it
+ try:
+ token = MobileAppAuthToken.objects.get(user=self.request.user)
+ token.delete()
+ except MobileAppAuthToken.DoesNotExist:
+ pass
+
+ instance, token = MobileAppAuthToken.create_auth_token(self.request.user, self.request.user.organization)
+ data = {"id": instance.pk, "token": token, "created_at": instance.created_at}
+ return Response(data, status=status.HTTP_201_CREATED)
+
+ if self.request.method == "DELETE":
+ try:
+ token = MobileAppAuthToken.objects.get(user=self.request.user)
+ token.delete()
+ except MobileAppVerificationToken.DoesNotExist:
+ raise NotFound
+
+ return Response(status=status.HTTP_204_NO_CONTENT)
diff --git a/engine/apps/api/views/user_group.py b/engine/apps/api/views/user_group.py
new file mode 100644
index 0000000000..4f230bae1a
--- /dev/null
+++ b/engine/apps/api/views/user_group.py
@@ -0,0 +1,23 @@
+from rest_framework import mixins, viewsets
+from rest_framework.filters import SearchFilter
+from rest_framework.permissions import IsAuthenticated
+
+from apps.api.serializers.user_group import UserGroupSerializer
+from apps.auth_token.auth import PluginAuthentication
+from apps.slack.models import SlackUserGroup
+
+
+class UserGroupViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated,)
+ serializer_class = UserGroupSerializer
+
+ filter_backends = (SearchFilter,)
+ search_fields = ("name", "handle")
+
+ def get_queryset(self):
+ slack_team_identity = self.request.auth.organization.slack_team_identity
+ if slack_team_identity is None:
+ return SlackUserGroup.objects.none()
+
+ return slack_team_identity.usergroups.all()
diff --git a/engine/apps/api/views/user_notification_policy.py b/engine/apps/api/views/user_notification_policy.py
new file mode 100644
index 0000000000..03ea04dd1e
--- /dev/null
+++ b/engine/apps/api/views/user_notification_policy.py
@@ -0,0 +1,206 @@
+from django.apps import apps
+from django.conf import settings
+from django.http import Http404
+from rest_framework import status
+from rest_framework.decorators import action
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.viewsets import ModelViewSet
+
+from apps.api.permissions import (
+ MODIFY_ACTIONS,
+ READ_ACTIONS,
+ ActionPermission,
+ AnyRole,
+ IsAdminOrEditor,
+ IsOwnerOrAdmin,
+)
+from apps.api.serializers.user_notification_policy import (
+ UserNotificationPolicySerializer,
+ UserNotificationPolicyUpdateSerializer,
+)
+from apps.auth_token.auth import PluginAuthentication
+from apps.base.messaging import get_messaging_backend_from_id
+from apps.base.models import UserNotificationPolicy
+from apps.base.models.user_notification_policy import BUILT_IN_BACKENDS, NotificationChannelAPIOptions
+from apps.user_management.models import User
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.mixins import UpdateSerializerMixin
+
+
+class UserNotificationPolicyView(UpdateSerializerMixin, ModelViewSet):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, ActionPermission)
+
+ action_permissions = {
+ IsAdminOrEditor: (*MODIFY_ACTIONS, "move_to_position"),
+ AnyRole: (*READ_ACTIONS, "delay_options", "notify_by_options"),
+ }
+ action_object_permissions = {
+ IsOwnerOrAdmin: (*MODIFY_ACTIONS, "move_to_position"),
+ AnyRole: READ_ACTIONS,
+ }
+
+ ownership_field = "user"
+
+ model = UserNotificationPolicy
+ serializer_class = UserNotificationPolicySerializer
+ update_serializer_class = UserNotificationPolicyUpdateSerializer
+
+ def get_queryset(self):
+ important = self.request.query_params.get("important", None) == "true"
+ try:
+ user_id = self.request.query_params.get("user", None)
+ except ValueError:
+ raise BadRequest(detail="Invalid user param")
+ if user_id is None or user_id == self.request.user.public_primary_key:
+ queryset = self.model.objects.get_or_create_for_user(user=self.request.user, important=important)
+ else:
+ try:
+ target_user = User.objects.get(public_primary_key=user_id)
+ except User.DoesNotExist:
+ raise BadRequest(detail="User does not exist")
+
+ queryset = self.model.objects.get_or_create_for_user(user=target_user, important=important)
+
+ queryset = self.serializer_class.setup_eager_loading(queryset)
+
+ return queryset.order_by("order")
+
+ def get_object(self):
+ # we need overriden get object, because original one call get_queryset first and raise 404 trying to access
+ # other user policies
+ pk = self.kwargs["pk"]
+ organization = self.request.auth.organization
+
+ try:
+ obj = UserNotificationPolicy.objects.get(public_primary_key=pk, user__organization=organization)
+ except UserNotificationPolicy.DoesNotExist:
+ raise Http404
+
+ self.check_object_permissions(self.request, obj)
+ return obj
+
+ def perform_create(self, serializer):
+ organization = self.request.auth.organization
+ user = serializer.validated_data.get("user") or self.request.user
+ old_state = user.repr_settings_for_client_side_logging
+ serializer.save()
+ new_state = user.repr_settings_for_client_side_logging
+ description = f"User settings for user {user.username} was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(
+ organization,
+ self.request.user,
+ OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
+ description,
+ )
+
+ def perform_update(self, serializer):
+ organization = self.request.auth.organization
+ user = serializer.validated_data.get("user") or self.request.user
+ old_state = user.repr_settings_for_client_side_logging
+ serializer.save()
+ new_state = user.repr_settings_for_client_side_logging
+ description = f"User settings for user {user.username} was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(
+ organization,
+ self.request.user,
+ OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
+ description,
+ )
+
+ def perform_destroy(self, instance):
+ organization = self.request.auth.organization
+ user = instance.user
+ old_state = user.repr_settings_for_client_side_logging
+ instance.delete()
+ new_state = user.repr_settings_for_client_side_logging
+ description = f"User settings for user {user.username} was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(
+ organization,
+ self.request.user,
+ OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
+ description,
+ )
+
+ @action(detail=True, methods=["put"])
+ def move_to_position(self, request, pk):
+ position = request.query_params.get("position", None)
+ if position is not None:
+ step = self.get_object()
+ try:
+ step.to(int(position))
+ return Response(status=status.HTTP_200_OK)
+ except ValueError as e:
+ raise BadRequest(detail=f"{e}")
+ else:
+ raise BadRequest(detail="Position was not provided")
+
+ @action(detail=False, methods=["get"])
+ def delay_options(self, request):
+ choices = []
+ for item in UserNotificationPolicy.DURATION_CHOICES:
+ choices.append({"value": str(item[0]), "sec_value": item[0], "display_name": item[1]})
+ return Response(choices)
+
+ @action(detail=False, methods=["get"])
+ def notify_by_options(self, request):
+ """
+ Returns list of options for user notification policies dropping options that requires disabled features.
+ """
+ DynamicSetting = apps.get_model("base", "DynamicSetting")
+ choices = []
+ for notification_channel in NotificationChannelAPIOptions.AVAILABLE_FOR_USE:
+ slack_integration_required = (
+ notification_channel in NotificationChannelAPIOptions.SLACK_INTEGRATION_REQUIRED_NOTIFICATION_CHANNELS
+ )
+ telegram_integration_required = (
+ notification_channel
+ in NotificationChannelAPIOptions.TELEGRAM_INTEGRATION_REQUIRED_NOTIFICATION_CHANNELS
+ )
+ email_integration_required = (
+ notification_channel in NotificationChannelAPIOptions.EMAIL_INTEGRATION_REQUIRED_NOTIFICATION_CHANNELS
+ )
+ mobile_app_integration_required = (
+ notification_channel
+ in NotificationChannelAPIOptions.MOBILE_APP_INTEGRATION_REQUIRED_NOTIFICATION_CHANNELS
+ )
+ if slack_integration_required and not settings.FEATURE_SLACK_INTEGRATION_ENABLED:
+ continue
+ if telegram_integration_required and not settings.FEATURE_TELEGRAM_INTEGRATION_ENABLED:
+ continue
+ if email_integration_required and not settings.FEATURE_EMAIL_INTEGRATION_ENABLED:
+ continue
+ if mobile_app_integration_required and not settings.MOBILE_APP_PUSH_NOTIFICATIONS_ENABLED:
+ continue
+
+ # extra backends may be enabled per organization
+ if notification_channel.name not in BUILT_IN_BACKENDS:
+ extra_messaging_backend = get_messaging_backend_from_id(notification_channel.name)
+ if extra_messaging_backend is None:
+ continue
+
+ mobile_app_settings = DynamicSetting.objects.get_or_create(
+ name="mobile_app_settings",
+ defaults={
+ "json_value": {
+ "org_ids": [],
+ }
+ },
+ )[0]
+ if (
+ mobile_app_integration_required
+ and settings.MOBILE_APP_PUSH_NOTIFICATIONS_ENABLED
+ and self.request.auth.organization.pk not in mobile_app_settings.json_value["org_ids"]
+ ):
+ continue
+ choices.append(
+ {
+ "value": notification_channel,
+ "display_name": NotificationChannelAPIOptions.LABELS[notification_channel],
+ "slack_integration_required": slack_integration_required,
+ "telegram_integration_required": telegram_integration_required,
+ }
+ )
+ return Response(choices)
diff --git a/engine/apps/api_for_grafana_incident/__init__.py b/engine/apps/api_for_grafana_incident/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/api_for_grafana_incident/apps.py b/engine/apps/api_for_grafana_incident/apps.py
new file mode 100644
index 0000000000..e00766abd5
--- /dev/null
+++ b/engine/apps/api_for_grafana_incident/apps.py
@@ -0,0 +1,6 @@
+from django.apps import AppConfig
+
+
+class ApiForGrafanaIncidentConfig(AppConfig):
+ default_auto_field = "django.db.models.BigAutoField"
+ name = "apps.api_for_grafana_incident"
diff --git a/engine/apps/api_for_grafana_incident/serializers.py b/engine/apps/api_for_grafana_incident/serializers.py
new file mode 100644
index 0000000000..c6668933a8
--- /dev/null
+++ b/engine/apps/api_for_grafana_incident/serializers.py
@@ -0,0 +1,26 @@
+import logging
+
+from rest_framework import serializers
+
+from apps.alerts.models import AlertGroup
+from common.api_helpers.mixins import EagerLoadingMixin
+
+logger = logging.getLogger(__name__)
+
+
+class AlertGroupSerializer(EagerLoadingMixin, serializers.ModelSerializer):
+
+ id_oncall = serializers.CharField(read_only=True, source="public_primary_key")
+ status = serializers.SerializerMethodField(source="get_status")
+ link = serializers.CharField(read_only=True, source="web_link")
+
+ def get_status(self, obj):
+ return next(filter(lambda status: status[0] == obj.status, AlertGroup.STATUS_CHOICES))[1].lower()
+
+ class Meta:
+ model = AlertGroup
+ fields = [
+ "id_oncall",
+ "link",
+ "status",
+ ]
diff --git a/engine/apps/api_for_grafana_incident/urls.py b/engine/apps/api_for_grafana_incident/urls.py
new file mode 100644
index 0000000000..7da3e192ac
--- /dev/null
+++ b/engine/apps/api_for_grafana_incident/urls.py
@@ -0,0 +1,17 @@
+from django.urls import include, path
+
+from common.api_helpers.optional_slash_router import OptionalSlashRouter
+
+from . import views
+
+app_name = "api_for_grafana_incident"
+
+
+router = OptionalSlashRouter()
+
+router.register(r"alert-groups", views.AlertGroupsView, basename="alert-groups")
+
+
+urlpatterns = [
+ path("", include(router.urls)),
+]
diff --git a/engine/apps/api_for_grafana_incident/views.py b/engine/apps/api_for_grafana_incident/views.py
new file mode 100644
index 0000000000..182100a44a
--- /dev/null
+++ b/engine/apps/api_for_grafana_incident/views.py
@@ -0,0 +1,13 @@
+from rest_framework.viewsets import ReadOnlyModelViewSet
+
+from apps.alerts.models import AlertGroup
+from apps.auth_token.auth import GrafanaIncidentStaticKeyAuth
+
+from .serializers import AlertGroupSerializer
+
+
+class AlertGroupsView(ReadOnlyModelViewSet):
+ authentication_classes = (GrafanaIncidentStaticKeyAuth,)
+ queryset = AlertGroup.unarchived_objects.all()
+ serializer_class = AlertGroupSerializer
+ lookup_field = "public_primary_key"
diff --git a/engine/apps/auth_token/__init__.py b/engine/apps/auth_token/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/auth_token/auth.py b/engine/apps/auth_token/auth.py
new file mode 100644
index 0000000000..aa1a625131
--- /dev/null
+++ b/engine/apps/auth_token/auth.py
@@ -0,0 +1,246 @@
+import json
+import logging
+from typing import Tuple
+
+from django.conf import settings
+from django.contrib.auth.models import AnonymousUser
+from rest_framework import exceptions
+from rest_framework.authentication import BaseAuthentication, get_authorization_header
+from rest_framework.request import Request
+
+from apps.grafana_plugin.helpers.gcom import check_token
+from apps.public_api import constants as public_api_constants
+from apps.user_management.models import User
+from apps.user_management.models.organization import Organization
+from common.constants.role import Role
+
+from .constants import SCHEDULE_EXPORT_TOKEN_NAME, SLACK_AUTH_TOKEN_NAME
+from .exceptions import InvalidToken
+from .models import ApiAuthToken, PluginAuthToken, ScheduleExportAuthToken, SlackAuthToken, UserScheduleExportAuthToken
+from .models.mobile_app_auth_token import MobileAppAuthToken
+from .models.mobile_app_verification_token import MobileAppVerificationToken
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+class ApiTokenAuthentication(BaseAuthentication):
+ model = ApiAuthToken
+
+ def authenticate(self, request):
+ auth = get_authorization_header(request).decode("utf-8")
+
+ if auth == public_api_constants.DEMO_AUTH_TOKEN:
+ user = User.objects.get(public_primary_key=public_api_constants.DEMO_USER_ID)
+ auth_token = user.auth_tokens.first()
+ return user, auth_token
+
+ user, auth_token = self.authenticate_credentials(auth)
+
+ if user.role != Role.ADMIN:
+ raise exceptions.AuthenticationFailed(
+ "Only users with Admin permissions are allowed to perform this action."
+ )
+
+ return user, auth_token
+
+ def authenticate_credentials(self, token):
+ """
+ Due to the random nature of hashing a value, this must inspect
+ each auth_token individually to find the correct one.
+ """
+ try:
+ auth_token = self.model.validate_token_string(token)
+ except InvalidToken:
+ raise exceptions.AuthenticationFailed("Invalid token.")
+ return auth_token.user, auth_token
+
+
+class PluginAuthentication(BaseAuthentication):
+ def authenticate_header(self, request):
+ # Check parent's method comments
+ return "Bearer"
+
+ def authenticate(self, request: Request) -> Tuple[User, PluginAuthToken]:
+ token_string = get_authorization_header(request).decode()
+
+ if not token_string:
+ raise exceptions.AuthenticationFailed("No token provided")
+
+ return self.authenticate_credentials(token_string, request)
+
+ def authenticate_credentials(self, token_string: str, request: Request) -> Tuple[User, PluginAuthToken]:
+ context_string = request.headers.get("X-Instance-Context")
+ if not context_string:
+ raise exceptions.AuthenticationFailed("No instance context provided.")
+
+ context = json.loads(context_string)
+ try:
+ auth_token = check_token(token_string, context=context)
+ if not auth_token.organization:
+ raise exceptions.AuthenticationFailed("No organization associated with token.")
+ except InvalidToken:
+ raise exceptions.AuthenticationFailed("Invalid token.")
+
+ user = self._get_user(request, auth_token.organization)
+ return user, auth_token
+
+ @staticmethod
+ def _get_user(request: Request, organization: Organization) -> User:
+ context = json.loads(request.headers.get("X-Grafana-Context"))
+ user_id = context["UserId"]
+ try:
+ return organization.users.get(user_id=user_id)
+ except User.DoesNotExist:
+ logger.debug(f"Could not get user from grafana request. Context {context}")
+ raise exceptions.AuthenticationFailed("Non-existent or anonymous user.")
+
+
+class GrafanaIncidentUser(AnonymousUser):
+ @property
+ def is_authenticated(self):
+ # Always return True. This is a way to tell if
+ # the user has been authenticated in permissions
+ return True
+
+
+class GrafanaIncidentStaticKeyAuth(BaseAuthentication):
+ def authenticate_header(self, request): # noqa
+ # Check parent's method comments
+ return "Bearer"
+
+ def authenticate(self, request: Request) -> Tuple[GrafanaIncidentUser, None]:
+ token_string = get_authorization_header(request).decode()
+
+ if (
+ not token_string == settings.GRAFANA_INCIDENT_STATIC_API_KEY
+ or settings.GRAFANA_INCIDENT_STATIC_API_KEY is None
+ ):
+ raise exceptions.AuthenticationFailed("Wrong token")
+
+ if not token_string:
+ raise exceptions.AuthenticationFailed("No token provided")
+
+ return self.authenticate_credentials(token_string, request)
+
+ def authenticate_credentials(self, token_string: str, request: Request) -> Tuple[GrafanaIncidentUser, None]:
+ try:
+ user = GrafanaIncidentUser()
+ except InvalidToken:
+ raise exceptions.AuthenticationFailed("Invalid token.")
+
+ return user, None
+
+
+class SlackTokenAuthentication(BaseAuthentication):
+ model = SlackAuthToken
+
+ def authenticate(self, request) -> Tuple[User, SlackAuthToken]:
+ auth = request.query_params.get(SLACK_AUTH_TOKEN_NAME)
+ if not auth:
+ raise exceptions.AuthenticationFailed("Invalid token.")
+ user, auth_token = self.authenticate_credentials(auth)
+ return user, auth_token
+
+ def authenticate_credentials(self, token_string: str) -> Tuple[User, SlackAuthToken]:
+ try:
+ auth_token = self.model.validate_token_string(token_string)
+ except InvalidToken:
+ raise exceptions.AuthenticationFailed("Invalid token.")
+
+ return auth_token.user, auth_token
+
+
+class ScheduleExportAuthentication(BaseAuthentication):
+ model = ScheduleExportAuthToken
+
+ def authenticate(self, request) -> Tuple[User, ScheduleExportAuthToken]:
+ auth = request.query_params.get(SCHEDULE_EXPORT_TOKEN_NAME)
+ public_primary_key = request.parser_context.get("kwargs", {}).get("pk")
+ if not auth:
+ raise exceptions.AuthenticationFailed("Invalid token.")
+
+ auth_token = self.authenticate_credentials(auth, public_primary_key)
+ return auth_token
+
+ def authenticate_credentials(
+ self, token_string: str, public_primary_key: str
+ ) -> Tuple[User, ScheduleExportAuthToken]:
+ try:
+ auth_token = self.model.validate_token_string(token_string)
+ except InvalidToken:
+ raise exceptions.AuthenticationFailed("Invalid token.")
+
+ if auth_token.schedule.public_primary_key != public_primary_key:
+ raise exceptions.AuthenticationFailed("Invalid schedule export token for schedule")
+
+ if not auth_token.active:
+ raise exceptions.AuthenticationFailed("Export token is deactivated")
+
+ return auth_token.user, auth_token
+
+
+class UserScheduleExportAuthentication(BaseAuthentication):
+ model = UserScheduleExportAuthToken
+
+ def authenticate(self, request) -> Tuple[User, UserScheduleExportAuthToken]:
+ auth = request.query_params.get(SCHEDULE_EXPORT_TOKEN_NAME)
+ public_primary_key = request.parser_context.get("kwargs", {}).get("pk")
+
+ if not auth:
+ raise exceptions.AuthenticationFailed("Invalid token.")
+
+ auth_token = self.authenticate_credentials(auth, public_primary_key)
+ return auth_token
+
+ def authenticate_credentials(
+ self, token_string: str, public_primary_key: str
+ ) -> Tuple[User, UserScheduleExportAuthToken]:
+ try:
+ auth_token = self.model.validate_token_string(token_string)
+ except InvalidToken:
+ raise exceptions.AuthenticationFailed("Invalid token")
+
+ if auth_token.user.public_primary_key != public_primary_key:
+ raise exceptions.AuthenticationFailed("Invalid schedule export token for user")
+
+ if not auth_token.active:
+ raise exceptions.AuthenticationFailed("Export token is deactivated")
+
+ return auth_token.user, auth_token
+
+
+class MobileAppVerificationTokenAuthentication(BaseAuthentication):
+ model = MobileAppVerificationToken
+
+ def authenticate(self, request) -> Tuple[User, MobileAppVerificationToken]:
+ auth = get_authorization_header(request).decode("utf-8")
+ user, auth_token = self.authenticate_credentials(auth)
+ return user, auth_token
+
+ def authenticate_credentials(self, token_string: str) -> Tuple[User, MobileAppVerificationToken]:
+ try:
+ auth_token = self.model.validate_token_string(token_string)
+ except InvalidToken:
+ raise exceptions.AuthenticationFailed("Invalid token")
+
+ return auth_token.user, auth_token
+
+
+class MobileAppAuthTokenAuthentication(BaseAuthentication):
+ model = MobileAppAuthToken
+
+ def authenticate(self, request) -> Tuple[User, MobileAppAuthToken]:
+ auth = get_authorization_header(request).decode("utf-8")
+ user, auth_token = self.authenticate_credentials(auth)
+ if user is None:
+ return None
+ return user, auth_token
+
+ def authenticate_credentials(self, token_string: str) -> Tuple[User, MobileAppAuthToken]:
+ try:
+ auth_token = self.model.validate_token_string(token_string)
+ except InvalidToken:
+ return None, None
+
+ return auth_token.user, auth_token
diff --git a/engine/apps/auth_token/constants.py b/engine/apps/auth_token/constants.py
new file mode 100644
index 0000000000..676b6c8881
--- /dev/null
+++ b/engine/apps/auth_token/constants.py
@@ -0,0 +1,12 @@
+AUTH_TOKEN_CHARACTER_LENGTH = 64
+AUTH_SHORT_TOKEN_CHARACTER_LENGTH = 6
+TOKEN_KEY_LENGTH = 8
+DIGEST_LENGTH = 128
+MAX_PUBLIC_API_TOKENS_PER_USER = 5
+
+SLACK_AUTH_TOKEN_NAME = "slack_login_token"
+
+SCHEDULE_EXPORT_TOKEN_NAME = "token"
+SCHEDULE_EXPORT_TOKEN_CHARACTER_LENGTH = 32
+
+MOBILE_APP_AUTH_VERIFICATION_TOKEN_TIMEOUT_SECONDS = 60
diff --git a/engine/apps/auth_token/crypto.py b/engine/apps/auth_token/crypto.py
new file mode 100644
index 0000000000..11616216e8
--- /dev/null
+++ b/engine/apps/auth_token/crypto.py
@@ -0,0 +1,45 @@
+import binascii
+from os import urandom as generate_bytes
+from typing import Tuple
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives import hashes
+from cryptography.hazmat.primitives.hashes import SHA512
+
+from apps.auth_token import constants
+
+sha = SHA512
+
+
+def generate_token_string() -> str:
+ num_bytes = int(constants.AUTH_TOKEN_CHARACTER_LENGTH / 2)
+ return binascii.hexlify(generate_bytes(num_bytes)).decode()
+
+
+def generate_short_token_string() -> str:
+ num_bytes = int(constants.AUTH_SHORT_TOKEN_CHARACTER_LENGTH / 2)
+ return binascii.hexlify(generate_bytes(num_bytes)).decode()
+
+
+def hash_token_string(token_string: str) -> str:
+ digest = hashes.Hash(sha(), backend=default_backend())
+ digest.update(binascii.unhexlify(token_string))
+ return binascii.hexlify(digest.finalize()).decode()
+
+
+def generate_plugin_token_string_and_salt(stack_id: int, org_id: int) -> Tuple[str, str]:
+ random_salt = generate_bytes(int(constants.AUTH_TOKEN_CHARACTER_LENGTH / 2))
+ return generate_plugin_token_string(random_salt, stack_id, org_id), binascii.hexlify(random_salt).decode()
+
+
+def generate_plugin_token_string(salt: bytes, stack_id: int, org_id: int) -> str:
+ digest = hashes.Hash(sha(), backend=default_backend())
+ digest.update(salt)
+ digest.update(bytes(stack_id))
+ digest.update(bytes(org_id))
+ return binascii.hexlify(digest.finalize()).decode()
+
+
+def generate_schedule_token_string() -> str:
+ num_bytes = int(constants.SCHEDULE_EXPORT_TOKEN_CHARACTER_LENGTH / 2)
+ return binascii.hexlify(generate_bytes(num_bytes)).decode()
diff --git a/engine/apps/auth_token/exceptions.py b/engine/apps/auth_token/exceptions.py
new file mode 100644
index 0000000000..0ea79b0cc2
--- /dev/null
+++ b/engine/apps/auth_token/exceptions.py
@@ -0,0 +1,2 @@
+class InvalidToken(Exception):
+ pass
diff --git a/engine/apps/auth_token/migrations/0001_squashed_initial.py b/engine/apps/auth_token/migrations/0001_squashed_initial.py
new file mode 100644
index 0000000000..c8cb68542d
--- /dev/null
+++ b/engine/apps/auth_token/migrations/0001_squashed_initial.py
@@ -0,0 +1,107 @@
+# Generated by Django 3.2.5 on 2022-05-31 14:46
+
+import apps.auth_token.models.mobile_app_verification_token
+import apps.auth_token.models.slack_auth_token
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='ApiAuthToken',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('token_key', models.CharField(db_index=True, max_length=8)),
+ ('digest', models.CharField(max_length=128)),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('revoked_at', models.DateTimeField(null=True)),
+ ('name', models.CharField(max_length=50)),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='MobileAppAuthToken',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('token_key', models.CharField(db_index=True, max_length=8)),
+ ('digest', models.CharField(max_length=128)),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('revoked_at', models.DateTimeField(null=True)),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='MobileAppVerificationToken',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('token_key', models.CharField(db_index=True, max_length=8)),
+ ('digest', models.CharField(max_length=128)),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('revoked_at', models.DateTimeField(null=True)),
+ ('expire_date', models.DateTimeField(default=apps.auth_token.models.mobile_app_verification_token.get_expire_date)),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='PluginAuthToken',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('token_key', models.CharField(db_index=True, max_length=8)),
+ ('digest', models.CharField(max_length=128)),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('revoked_at', models.DateTimeField(null=True)),
+ ('salt', models.CharField(max_length=64, null=True)),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='ScheduleExportAuthToken',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('token_key', models.CharField(db_index=True, max_length=8)),
+ ('digest', models.CharField(max_length=128)),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('revoked_at', models.DateTimeField(null=True)),
+ ('active', models.BooleanField(default=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='SlackAuthToken',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('token_key', models.CharField(db_index=True, max_length=8)),
+ ('digest', models.CharField(max_length=128)),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('revoked_at', models.DateTimeField(null=True)),
+ ('expire_date', models.DateTimeField(default=apps.auth_token.models.slack_auth_token.get_expire_date)),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='UserScheduleExportAuthToken',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('token_key', models.CharField(db_index=True, max_length=8)),
+ ('digest', models.CharField(max_length=128)),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('revoked_at', models.DateTimeField(null=True)),
+ ('active', models.BooleanField(default=True)),
+ ],
+ ),
+ ]
diff --git a/engine/apps/auth_token/migrations/0002_squashed_initial.py b/engine/apps/auth_token/migrations/0002_squashed_initial.py
new file mode 100644
index 0000000000..e35e5ce214
--- /dev/null
+++ b/engine/apps/auth_token/migrations/0002_squashed_initial.py
@@ -0,0 +1,96 @@
+# Generated by Django 3.2.5 on 2022-05-31 14:46
+
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ('user_management', '0001_squashed_initial'),
+ ('schedules', '0001_squashed_initial'),
+ ('auth_token', '0001_squashed_initial'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='userscheduleexportauthtoken',
+ name='organization',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_schedule_export_token', to='user_management.organization'),
+ ),
+ migrations.AddField(
+ model_name='userscheduleexportauthtoken',
+ name='user',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_schedule_export_token', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='slackauthtoken',
+ name='organization',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='slack_auth_token_set', to='user_management.organization'),
+ ),
+ migrations.AddField(
+ model_name='slackauthtoken',
+ name='user',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='slack_auth_token_set', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='scheduleexportauthtoken',
+ name='organization',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='schedule_export_token', to='user_management.organization'),
+ ),
+ migrations.AddField(
+ model_name='scheduleexportauthtoken',
+ name='schedule',
+ field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='schedule_export_token', to='schedules.oncallschedule'),
+ ),
+ migrations.AddField(
+ model_name='scheduleexportauthtoken',
+ name='user',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='schedule_export_token', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='pluginauthtoken',
+ name='organization',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user_management.organization'),
+ ),
+ migrations.AddField(
+ model_name='mobileappverificationtoken',
+ name='organization',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mobile_app_verification_token_set', to='user_management.organization'),
+ ),
+ migrations.AddField(
+ model_name='mobileappverificationtoken',
+ name='user',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mobile_app_verification_token_set', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='mobileappauthtoken',
+ name='organization',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mobile_app_auth_tokens', to='user_management.organization'),
+ ),
+ migrations.AddField(
+ model_name='mobileappauthtoken',
+ name='user',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mobile_app_auth_tokens', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='apiauthtoken',
+ name='organization',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='auth_tokens', to='user_management.organization'),
+ ),
+ migrations.AddField(
+ model_name='apiauthtoken',
+ name='user',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='auth_tokens', to='user_management.user'),
+ ),
+ migrations.AlterUniqueTogether(
+ name='userscheduleexportauthtoken',
+ unique_together={('user', 'organization')},
+ ),
+ migrations.AlterUniqueTogether(
+ name='scheduleexportauthtoken',
+ unique_together={('user', 'organization', 'schedule')},
+ ),
+ ]
diff --git a/engine/apps/auth_token/migrations/0003_squashed_create_demo_token_instances.py b/engine/apps/auth_token/migrations/0003_squashed_create_demo_token_instances.py
new file mode 100644
index 0000000000..225e0fcb23
--- /dev/null
+++ b/engine/apps/auth_token/migrations/0003_squashed_create_demo_token_instances.py
@@ -0,0 +1,40 @@
+# Generated by Django 3.2.5 on 2021-08-04 13:02
+
+import sys
+from django.db import migrations
+
+from apps.auth_token import constants
+from apps.auth_token import crypto
+from apps.public_api import constants as public_api_constants
+
+
+def create_demo_token_instances(apps, schema_editor):
+ if not (len(sys.argv) > 1 and sys.argv[1] == 'test'):
+ User = apps.get_model('user_management', 'User')
+ Organization = apps.get_model('user_management', 'Organization')
+ ApiAuthToken = apps.get_model('auth_token', 'ApiAuthToken')
+
+ organization = Organization.objects.get(public_primary_key=public_api_constants.DEMO_ORGANIZATION_ID)
+ user = User.objects.get(public_primary_key=public_api_constants.DEMO_USER_ID)
+
+ token_string = crypto.generate_token_string()
+ digest = crypto.hash_token_string(token_string)
+
+ ApiAuthToken.objects.get_or_create(
+ name=public_api_constants.DEMO_AUTH_TOKEN,
+ user=user,
+ organization=organization,
+ defaults=dict(token_key=token_string[:constants.TOKEN_KEY_LENGTH], digest=digest)
+ )
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('auth_token', '0002_squashed_initial'),
+ ('user_management', '0002_squashed_create_demo_token_instances')
+ ]
+
+ operations = [
+ migrations.RunPython(create_demo_token_instances, migrations.RunPython.noop)
+ ]
diff --git a/engine/apps/auth_token/migrations/__init__.py b/engine/apps/auth_token/migrations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/auth_token/models/__init__.py b/engine/apps/auth_token/models/__init__.py
new file mode 100644
index 0000000000..4327ad525a
--- /dev/null
+++ b/engine/apps/auth_token/models/__init__.py
@@ -0,0 +1,6 @@
+from .api_auth_token import ApiAuthToken # noqa: F401
+from .base_auth_token import BaseAuthToken # noqa: F401
+from .plugin_auth_token import PluginAuthToken # noqa: F401
+from .schedule_export_auth_token import ScheduleExportAuthToken # noqa: F401
+from .slack_auth_token import SlackAuthToken # noqa: F401
+from .user_schedule_export_auth_token import UserScheduleExportAuthToken # noqa: F401
diff --git a/engine/apps/auth_token/models/api_auth_token.py b/engine/apps/auth_token/models/api_auth_token.py
new file mode 100644
index 0000000000..3309a505ae
--- /dev/null
+++ b/engine/apps/auth_token/models/api_auth_token.py
@@ -0,0 +1,32 @@
+from typing import Tuple
+
+from django.db import models
+
+from apps.auth_token import constants, crypto
+from apps.auth_token.models.base_auth_token import BaseAuthToken
+from apps.user_management.models import Organization, User
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+
+
+class ApiAuthToken(BaseAuthToken):
+ user = models.ForeignKey(to=User, null=False, blank=False, related_name="auth_tokens", on_delete=models.CASCADE)
+ organization = models.ForeignKey(
+ to=Organization, null=False, blank=False, related_name="auth_tokens", on_delete=models.CASCADE
+ )
+ name = models.CharField(max_length=50)
+
+ @classmethod
+ def create_auth_token(cls, user: User, organization: Organization, name: str) -> Tuple["ApiAuthToken", str]:
+ token_string = crypto.generate_token_string()
+ digest = crypto.hash_token_string(token_string)
+
+ instance = cls.objects.create(
+ token_key=token_string[: constants.TOKEN_KEY_LENGTH],
+ digest=digest,
+ user=user,
+ organization=organization,
+ name=name,
+ )
+ description = f"API token {instance.name} was created"
+ create_organization_log(organization, user, OrganizationLogType.TYPE_API_TOKEN_CREATED, description)
+ return instance, token_string
diff --git a/engine/apps/auth_token/models/base_auth_token.py b/engine/apps/auth_token/models/base_auth_token.py
new file mode 100644
index 0000000000..5861cb64f1
--- /dev/null
+++ b/engine/apps/auth_token/models/base_auth_token.py
@@ -0,0 +1,44 @@
+import binascii
+from hmac import compare_digest
+from typing import Optional
+
+from django.db import models
+from django.utils import timezone
+
+from apps.auth_token import constants
+from apps.auth_token.crypto import hash_token_string
+from apps.auth_token.exceptions import InvalidToken
+
+
+class AuthTokenQueryset(models.QuerySet):
+ def filter(self, *args, **kwargs):
+ return super().filter(*args, **kwargs, revoked_at=None)
+
+ def delete(self):
+ self.update(revoked_at=timezone.now())
+
+
+class BaseAuthToken(models.Model):
+ class Meta:
+ abstract = True
+
+ objects = AuthTokenQueryset.as_manager()
+ objects_with_deleted = models.Manager()
+
+ token_key = models.CharField(max_length=constants.TOKEN_KEY_LENGTH, db_index=True)
+ digest = models.CharField(max_length=constants.DIGEST_LENGTH)
+
+ created_at = models.DateTimeField(auto_now_add=True)
+ revoked_at = models.DateTimeField(null=True)
+
+ @classmethod
+ def validate_token_string(cls, token: str, *args, **kwargs) -> Optional["BaseAuthToken"]:
+ for auth_token in cls.objects.filter(token_key=token[: constants.TOKEN_KEY_LENGTH]):
+ try:
+ digest = hash_token_string(token)
+ except (TypeError, binascii.Error):
+ raise InvalidToken
+ if compare_digest(digest, auth_token.digest):
+ return auth_token
+
+ raise InvalidToken
diff --git a/engine/apps/auth_token/models/mobile_app_auth_token.py b/engine/apps/auth_token/models/mobile_app_auth_token.py
new file mode 100644
index 0000000000..333ed788ed
--- /dev/null
+++ b/engine/apps/auth_token/models/mobile_app_auth_token.py
@@ -0,0 +1,29 @@
+from typing import Tuple
+
+from django.db import models
+
+from apps.auth_token import constants, crypto
+from apps.auth_token.models.base_auth_token import BaseAuthToken
+from apps.user_management.models import Organization, User
+
+
+class MobileAppAuthToken(BaseAuthToken):
+ user = models.ForeignKey(
+ to=User, null=False, blank=False, related_name="mobile_app_auth_tokens", on_delete=models.CASCADE
+ )
+ organization = models.ForeignKey(
+ to=Organization, null=False, blank=False, related_name="mobile_app_auth_tokens", on_delete=models.CASCADE
+ )
+
+ @classmethod
+ def create_auth_token(cls, user: User, organization: Organization) -> Tuple["MobileAppAuthToken", str]:
+ token_string = crypto.generate_token_string()
+ digest = crypto.hash_token_string(token_string)
+
+ instance = cls.objects.create(
+ token_key=token_string[: constants.TOKEN_KEY_LENGTH],
+ digest=digest,
+ user=user,
+ organization=organization,
+ )
+ return instance, token_string
diff --git a/engine/apps/auth_token/models/mobile_app_verification_token.py b/engine/apps/auth_token/models/mobile_app_verification_token.py
new file mode 100644
index 0000000000..f67f8f3f3d
--- /dev/null
+++ b/engine/apps/auth_token/models/mobile_app_verification_token.py
@@ -0,0 +1,48 @@
+from typing import Tuple
+
+from django.db import models
+from django.utils import timezone
+
+from apps.auth_token import constants, crypto
+from apps.auth_token.constants import MOBILE_APP_AUTH_VERIFICATION_TOKEN_TIMEOUT_SECONDS
+from apps.auth_token.models import BaseAuthToken
+from apps.user_management.models import Organization, User
+
+
+def get_expire_date():
+ return timezone.now() + timezone.timedelta(seconds=MOBILE_APP_AUTH_VERIFICATION_TOKEN_TIMEOUT_SECONDS)
+
+
+class MobileAppVerificationTokenQueryset(models.QuerySet):
+ def filter(self, *args, **kwargs):
+ now = timezone.now()
+ return super().filter(*args, **kwargs, revoked_at=None, expire_date__gte=now)
+
+ def delete(self):
+ self.update(revoked_at=timezone.now())
+
+
+class MobileAppVerificationToken(BaseAuthToken):
+ objects = MobileAppVerificationTokenQueryset.as_manager()
+ user = models.ForeignKey(
+ "user_management.User",
+ related_name="mobile_app_verification_token_set",
+ on_delete=models.CASCADE,
+ )
+ organization = models.ForeignKey(
+ "user_management.Organization", related_name="mobile_app_verification_token_set", on_delete=models.CASCADE
+ )
+ expire_date = models.DateTimeField(default=get_expire_date)
+
+ @classmethod
+ def create_auth_token(cls, user: User, organization: Organization) -> Tuple["MobileAppVerificationToken", str]:
+ token_string = crypto.generate_short_token_string()
+ digest = crypto.hash_token_string(token_string)
+
+ instance = cls.objects.create(
+ token_key=token_string[: constants.TOKEN_KEY_LENGTH],
+ digest=digest,
+ user=user,
+ organization=organization,
+ )
+ return instance, token_string
diff --git a/engine/apps/auth_token/models/plugin_auth_token.py b/engine/apps/auth_token/models/plugin_auth_token.py
new file mode 100644
index 0000000000..cd33c25b98
--- /dev/null
+++ b/engine/apps/auth_token/models/plugin_auth_token.py
@@ -0,0 +1,53 @@
+import binascii
+from hmac import compare_digest
+from typing import Optional, Tuple
+
+from django.db import models
+
+from apps.auth_token import constants
+from apps.auth_token.crypto import (
+ generate_plugin_token_string,
+ generate_plugin_token_string_and_salt,
+ hash_token_string,
+)
+from apps.auth_token.exceptions import InvalidToken
+from apps.auth_token.models import BaseAuthToken
+from apps.user_management.models import Organization
+
+
+class PluginAuthToken(BaseAuthToken):
+ salt = models.CharField(max_length=constants.AUTH_TOKEN_CHARACTER_LENGTH, null=True)
+ organization = models.ForeignKey(to=Organization, on_delete=models.CASCADE)
+
+ @classmethod
+ def create_auth_token(cls, organization: Organization) -> Tuple["PluginAuthToken", str]:
+ old_token = cls.objects.filter(organization=organization)
+
+ if old_token.exists():
+ old_token.delete()
+
+ token_string, salt = generate_plugin_token_string_and_salt(organization.stack_id, organization.org_id)
+ digest = hash_token_string(token_string)
+
+ auth_token = cls.objects.create(
+ token_key=token_string[: constants.TOKEN_KEY_LENGTH],
+ digest=digest,
+ salt=salt,
+ organization=organization,
+ )
+ return auth_token, token_string
+
+ @classmethod
+ def validate_token_string(cls, token: str, *args, **kwargs) -> Optional["PluginAuthToken"]:
+ context = kwargs["context"]
+ for auth_token in cls.objects.filter(token_key=token[: constants.TOKEN_KEY_LENGTH]):
+ try:
+ stack_id = int(context["stack_id"])
+ org_id = int(context["org_id"])
+ salt = binascii.unhexlify(auth_token.salt)
+ recreated_token = generate_plugin_token_string(salt, stack_id, org_id)
+ digest = hash_token_string(recreated_token)
+ except (TypeError, binascii.Error):
+ raise InvalidToken
+ if compare_digest(digest, auth_token.digest) and token == recreated_token:
+ return auth_token
diff --git a/engine/apps/auth_token/models/schedule_export_auth_token.py b/engine/apps/auth_token/models/schedule_export_auth_token.py
new file mode 100644
index 0000000000..4df656f94c
--- /dev/null
+++ b/engine/apps/auth_token/models/schedule_export_auth_token.py
@@ -0,0 +1,45 @@
+from typing import Tuple
+
+from django.db import models
+
+from apps.auth_token import constants, crypto
+from apps.auth_token.models.base_auth_token import BaseAuthToken
+from apps.schedules.models import OnCallSchedule
+from apps.user_management.models import Organization, User
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+
+
+class ScheduleExportAuthToken(BaseAuthToken):
+ class Meta:
+ unique_together = ("user", "organization", "schedule")
+
+ user = models.ForeignKey(
+ to=User, null=False, blank=False, related_name="schedule_export_token", on_delete=models.CASCADE
+ )
+ organization = models.ForeignKey(
+ to=Organization, null=False, blank=False, related_name="schedule_export_token", on_delete=models.CASCADE
+ )
+ schedule = models.ForeignKey(
+ to=OnCallSchedule, null=True, blank=True, related_name="schedule_export_token", on_delete=models.CASCADE
+ )
+ active = models.BooleanField(default=True)
+
+ @classmethod
+ def create_auth_token(
+ cls, user: User, organization: Organization, schedule: OnCallSchedule = None
+ ) -> Tuple["ScheduleExportAuthToken", str]:
+ token_string = crypto.generate_schedule_token_string()
+ digest = crypto.hash_token_string(token_string)
+
+ instance = cls.objects.create(
+ token_key=token_string[: constants.TOKEN_KEY_LENGTH],
+ digest=digest,
+ user=user,
+ organization=organization,
+ schedule=schedule,
+ )
+ description = "Schedule export token was created by user {0} for schedule {1}".format(
+ user.username, schedule.name
+ )
+ create_organization_log(organization, user, OrganizationLogType.TYPE_SCHEDULE_EXPORT_TOKEN_CREATED, description)
+ return instance, token_string
diff --git a/engine/apps/auth_token/models/slack_auth_token.py b/engine/apps/auth_token/models/slack_auth_token.py
new file mode 100644
index 0000000000..6e144230f7
--- /dev/null
+++ b/engine/apps/auth_token/models/slack_auth_token.py
@@ -0,0 +1,48 @@
+from typing import Tuple
+
+from django.db import models
+from django.utils import timezone
+
+from apps.auth_token import constants, crypto
+from apps.auth_token.models import BaseAuthToken
+from apps.user_management.models import Organization, User
+from settings.base import SLACK_AUTH_TOKEN_TIMEOUT_SECONDS
+
+
+def get_expire_date():
+ return timezone.now() + timezone.timedelta(seconds=SLACK_AUTH_TOKEN_TIMEOUT_SECONDS)
+
+
+class SlackAuthTokenQueryset(models.QuerySet):
+ def filter(self, *args, **kwargs):
+ now = timezone.now()
+ return super().filter(*args, **kwargs, revoked_at=None, expire_date__gte=now)
+
+ def delete(self):
+ self.update(revoked_at=timezone.now())
+
+
+class SlackAuthToken(BaseAuthToken):
+ objects = SlackAuthTokenQueryset.as_manager()
+ user = models.ForeignKey(
+ "user_management.User",
+ related_name="slack_auth_token_set",
+ on_delete=models.CASCADE,
+ )
+ organization = models.ForeignKey(
+ "user_management.Organization", related_name="slack_auth_token_set", on_delete=models.CASCADE
+ )
+ expire_date = models.DateTimeField(default=get_expire_date)
+
+ @classmethod
+ def create_auth_token(cls, user: User, organization: Organization) -> Tuple["SlackAuthToken", str]:
+ token_string = crypto.generate_token_string()
+ digest = crypto.hash_token_string(token_string)
+
+ instance = cls.objects.create(
+ token_key=token_string[: constants.TOKEN_KEY_LENGTH],
+ digest=digest,
+ user=user,
+ organization=organization,
+ )
+ return instance, token_string
diff --git a/engine/apps/auth_token/models/user_schedule_export_auth_token.py b/engine/apps/auth_token/models/user_schedule_export_auth_token.py
new file mode 100644
index 0000000000..34242dabbf
--- /dev/null
+++ b/engine/apps/auth_token/models/user_schedule_export_auth_token.py
@@ -0,0 +1,36 @@
+from typing import Tuple
+
+from django.db import models
+
+from apps.auth_token import constants, crypto
+from apps.auth_token.models.base_auth_token import BaseAuthToken
+from apps.user_management.models import Organization, User
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+
+
+class UserScheduleExportAuthToken(BaseAuthToken):
+ class Meta:
+ unique_together = ("user", "organization")
+
+ user = models.ForeignKey(
+ to=User, null=False, blank=False, related_name="user_schedule_export_token", on_delete=models.CASCADE
+ )
+ organization = models.ForeignKey(
+ to=Organization, null=False, blank=False, related_name="user_schedule_export_token", on_delete=models.CASCADE
+ )
+ active = models.BooleanField(default=True)
+
+ @classmethod
+ def create_auth_token(cls, user: User, organization: Organization) -> Tuple["UserScheduleExportAuthToken", str]:
+ token_string = crypto.generate_schedule_token_string()
+ digest = crypto.hash_token_string(token_string)
+
+ instance = cls.objects.create(
+ token_key=token_string[: constants.TOKEN_KEY_LENGTH],
+ digest=digest,
+ user=user,
+ organization=organization,
+ )
+ description = "User schedule export token was created by user {0}".format(user.username)
+ create_organization_log(organization, user, OrganizationLogType.TYPE_SCHEDULE_EXPORT_TOKEN_CREATED, description)
+ return instance, token_string
diff --git a/engine/apps/auth_token/tests/__init__.py b/engine/apps/auth_token/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/auth_token/tests/test_crypto.py b/engine/apps/auth_token/tests/test_crypto.py
new file mode 100644
index 0000000000..7805cc9d8d
--- /dev/null
+++ b/engine/apps/auth_token/tests/test_crypto.py
@@ -0,0 +1,23 @@
+import binascii
+from hmac import compare_digest
+
+from apps.auth_token.crypto import (
+ generate_plugin_token_string,
+ generate_plugin_token_string_and_salt,
+ hash_token_string,
+)
+
+
+def test_plugin_token_round_trip():
+ stack_id = 100
+ org_id = 100
+
+ hex_token, hex_salt = generate_plugin_token_string_and_salt(stack_id, org_id)
+ hex_signature = hash_token_string(hex_token)
+
+ raw_salt = binascii.unhexlify(hex_salt)
+ hex_recreated_token = generate_plugin_token_string(raw_salt, stack_id, org_id)
+ hex_recreated_signature = hash_token_string(hex_recreated_token)
+
+ assert hex_token == hex_recreated_token
+ assert compare_digest(hex_signature, hex_recreated_signature)
diff --git a/engine/apps/base/__init__.py b/engine/apps/base/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/base/admin.py b/engine/apps/base/admin.py
new file mode 100644
index 0000000000..7224190bc4
--- /dev/null
+++ b/engine/apps/base/admin.py
@@ -0,0 +1,24 @@
+from django.contrib import admin
+
+from common.admin import CustomModelAdmin
+
+from .models import DynamicSetting, FailedToInvokeCeleryTask, UserNotificationPolicy, UserNotificationPolicyLogRecord
+
+admin.site.register(DynamicSetting)
+
+
+@admin.register(UserNotificationPolicy)
+class UserNotificationPolicyAdmin(CustomModelAdmin):
+ list_display = ("id", "public_primary_key", "user", "important", "short_verbal")
+
+
+@admin.register(UserNotificationPolicyLogRecord)
+class UserNotificationPolicyLogRecordAdmin(CustomModelAdmin):
+ list_display = ("id", "alert_group", "notification_policy", "author", "type", "created_at")
+ list_filter = ("type", "created_at")
+
+
+@admin.register(FailedToInvokeCeleryTask)
+class FailedToInvokeCeleryTaskAdmin(CustomModelAdmin):
+ list_display = ("id", "name", "is_sent")
+ list_filter = ("is_sent",)
diff --git a/engine/apps/base/constants.py b/engine/apps/base/constants.py
new file mode 100644
index 0000000000..3e719f8ce1
--- /dev/null
+++ b/engine/apps/base/constants.py
@@ -0,0 +1,23 @@
+# This is temporary solution to not to hardcode permissions on frontend
+# Is should be removed with one which will collect permission from action_permission views' attribute
+ALL_PERMISSIONS = [
+ "update_incidents",
+ "update_alert_receive_channels",
+ "update_escalation_policies",
+ "update_notification_policies",
+ "update_general_log_channel_id",
+ "update_own_settings",
+ "update_other_users_settings",
+ "update_integrations",
+ "update_schedules",
+ "update_custom_actions",
+ "update_api_tokens",
+ "update_teams",
+ "update_maintenances",
+ "update_global_settings",
+ "send_demo_alert",
+ "view_other_users",
+]
+ADMIN_PERMISSIONS = ALL_PERMISSIONS
+EDITOR_PERMISSIONS = ["update_incidents", "update_own_settings", "view_other_users"]
+ALL_ROLES_PERMISSIONS = []
diff --git a/engine/apps/base/messaging.py b/engine/apps/base/messaging.py
new file mode 100644
index 0000000000..694bb22182
--- /dev/null
+++ b/engine/apps/base/messaging.py
@@ -0,0 +1,73 @@
+from django.conf import settings
+from django.utils.module_loading import import_string
+
+
+class BaseMessagingBackend:
+ backend_id = "SOMEID"
+ label = "The Backend"
+ short_label = "Backend"
+ available_for_use = False
+ templater = None
+
+ def get_templater_class(self):
+ if self.templater:
+ return import_string(self.templater)
+
+ def validate_channel_filter_data(self, channel_filter, data):
+ """Validate JSON channel data for a channel filter update.
+
+ Ensure the required/expected data is provided as needed by the backend.
+
+ """
+ return data
+
+ def generate_channel_verification_code(self, organization):
+ """Return a verification code for a channel registration."""
+ raise NotImplementedError("generate_channel_verification_code method missing implementation")
+
+ def generate_user_verification_code(self, user):
+ """Return a verification code to link a user with an account."""
+ raise NotImplementedError("generate_user_verification_code method missing implementation")
+
+ def unlink_user(self, user):
+ """Remove backend link to user account."""
+ return
+
+ def serialize_user(self, user):
+ """Return a serialized backend user representation."""
+ raise NotImplementedError("serialize_user method missing implementation")
+
+ def notify_user(self, user, alert_group, notification_policy):
+ """Send user a notification for the given alert group.
+
+ The notification policy links to the backend as the notification channel.
+
+ """
+ raise NotImplementedError("notify_user method missing implementation")
+
+
+def load_backend(path):
+ return import_string(path)()
+
+
+def get_messaging_backends():
+ global _messaging_backends
+ if not settings.FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED:
+ return {}
+
+ if _messaging_backends is None:
+ _messaging_backends = {}
+ for backend_path in settings.EXTRA_MESSAGING_BACKENDS:
+ backend = load_backend(backend_path)
+ _messaging_backends[backend.backend_id] = backend
+ return _messaging_backends.items()
+
+
+def get_messaging_backend_from_id(backend_id):
+ backend = None
+ if settings.FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED:
+ backend = _messaging_backends.get(backend_id)
+ return backend
+
+
+_messaging_backends = None
diff --git a/engine/apps/base/migrations/0001_squashed_initial.py b/engine/apps/base/migrations/0001_squashed_initial.py
new file mode 100644
index 0000000000..dfdf66d3cf
--- /dev/null
+++ b/engine/apps/base/migrations/0001_squashed_initial.py
@@ -0,0 +1,89 @@
+# Generated by Django 3.2.5 on 2022-05-31 14:46
+
+import apps.base.models.live_setting
+import apps.base.models.organization_log_record
+import apps.base.models.user_notification_policy
+import datetime
+import django.core.validators
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ('alerts', '0001_squashed_initial'),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='DynamicSetting',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('name', models.CharField(max_length=100)),
+ ('boolean_value', models.BooleanField(default=None, null=True)),
+ ('numeric_value', models.IntegerField(default=None, null=True)),
+ ('json_value', models.JSONField(blank=True, default=None, null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='FailedToInvokeCeleryTask',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('name', models.CharField(max_length=500)),
+ ('parameters', models.JSONField()),
+ ('is_sent', models.BooleanField(default=False)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='LiveSetting',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('public_primary_key', models.CharField(default=apps.base.models.live_setting.generate_public_primary_key_for_live_setting, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('name', models.CharField(max_length=50, unique=True)),
+ ('value', models.JSONField(default=None, null=True)),
+ ('error', models.TextField(default=None, null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='OrganizationLogRecord',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('public_primary_key', models.CharField(default=apps.base.models.organization_log_record.generate_public_primary_key_for_organization_log, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('description', models.TextField(default=None, null=True)),
+ ('_labels', models.JSONField(default=list)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='UserNotificationPolicy',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
+ ('public_primary_key', models.CharField(default=apps.base.models.user_notification_policy.generate_public_primary_key_for_notification_policy, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('step', models.PositiveSmallIntegerField(choices=[(0, 'Wait'), (1, 'Notify by')], default=None, null=True)),
+ ('notify_by', models.PositiveSmallIntegerField(default=0, validators=[apps.base.models.user_notification_policy.validate_channel_choice])),
+ ('wait_delay', models.DurationField(choices=[(datetime.timedelta(seconds=60), '1 min'), (datetime.timedelta(seconds=300), '5 min'), (datetime.timedelta(seconds=900), '15 min'), (datetime.timedelta(seconds=1800), '30 min'), (datetime.timedelta(seconds=3600), '60 min')], default=None, null=True)),
+ ('important', models.BooleanField(default=False)),
+ ],
+ options={
+ 'ordering': ('order',),
+ },
+ ),
+ migrations.CreateModel(
+ name='UserNotificationPolicyLogRecord',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('type', models.IntegerField(choices=[(0, 'Personal notification triggered'), (1, 'Personal notification finished'), (2, 'Personal notification success'), (3, 'Personal notification failed')])),
+ ('slack_prevent_posting', models.BooleanField(default=False)),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('reason', models.TextField(default=None, null=True)),
+ ('notification_error_code', models.PositiveIntegerField(default=None, null=True)),
+ ('notification_step', models.IntegerField(choices=[(0, 'Wait'), (1, 'Notify by')], default=None, null=True)),
+ ('notification_channel', models.IntegerField(default=None, null=True, validators=[apps.base.models.user_notification_policy.validate_channel_choice])),
+ ('alert_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='personal_log_records', to='alerts.alertgroup')),
+ ],
+ ),
+ ]
diff --git a/engine/apps/base/migrations/0002_squashed_initial.py b/engine/apps/base/migrations/0002_squashed_initial.py
new file mode 100644
index 0000000000..9bf98adbb7
--- /dev/null
+++ b/engine/apps/base/migrations/0002_squashed_initial.py
@@ -0,0 +1,46 @@
+# Generated by Django 3.2.5 on 2022-05-31 14:46
+
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ('user_management', '0001_squashed_initial'),
+ ('base', '0001_squashed_initial'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='usernotificationpolicylogrecord',
+ name='author',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='personal_log_records', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='usernotificationpolicylogrecord',
+ name='notification_policy',
+ field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='personal_log_records', to='base.usernotificationpolicy'),
+ ),
+ migrations.AddField(
+ model_name='usernotificationpolicy',
+ name='user',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='notification_policies', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='organizationlogrecord',
+ name='author',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='team_log_records', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='organizationlogrecord',
+ name='organization',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='log_records', to='user_management.organization'),
+ ),
+ migrations.AddConstraint(
+ model_name='dynamicsetting',
+ constraint=models.UniqueConstraint(fields=('name',), name='unique_dynamic_setting_name'),
+ ),
+ ]
diff --git a/engine/apps/base/migrations/0003_squashed_create_demo_token_instances.py b/engine/apps/base/migrations/0003_squashed_create_demo_token_instances.py
new file mode 100644
index 0000000000..a590210ade
--- /dev/null
+++ b/engine/apps/base/migrations/0003_squashed_create_demo_token_instances.py
@@ -0,0 +1,74 @@
+# Generated by Django 3.2.5 on 2021-08-04 10:45
+
+import sys
+from django.db import migrations
+from django.utils import timezone
+from apps.public_api import constants as public_api_constants
+
+
+STEP_WAIT = 0
+STEP_NOTIFY = 1
+NOTIFY_BY_SMS = 1
+NOTIFY_BY_PHONE = 2
+FIVE_MINUTES = timezone.timedelta(minutes=5)
+
+
+def create_demo_token_instances(apps, schema_editor):
+ if not (len(sys.argv) > 1 and sys.argv[1] == 'test'):
+ User = apps.get_model('user_management', 'User')
+ UserNotificationPolicy = apps.get_model("base", "UserNotificationPolicy")
+
+ user = User.objects.get(public_primary_key=public_api_constants.DEMO_USER_ID)
+
+ UserNotificationPolicy.objects.get_or_create(
+ public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_1,
+ defaults=dict(
+ important=False,
+ user=user,
+ notify_by=NOTIFY_BY_SMS,
+ step=STEP_NOTIFY,
+ order=0,
+ )
+ )
+ UserNotificationPolicy.objects.get_or_create(
+ public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_2,
+ defaults=dict(
+ important=False,
+ user=user,
+ step=STEP_WAIT,
+ wait_delay=FIVE_MINUTES,
+ order=1,
+ )
+ )
+ UserNotificationPolicy.objects.get_or_create(
+ public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_3,
+ defaults=dict(
+ important=False,
+ user=user,
+ step=STEP_NOTIFY,
+ notify_by=NOTIFY_BY_PHONE,
+ order=2,
+ )
+ )
+
+ UserNotificationPolicy.objects.get_or_create(
+ public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_4,
+ defaults=dict(
+ important=True,
+ user=user,
+ notify_by=NOTIFY_BY_PHONE,
+ order=0,
+ )
+ )
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('base', '0002_squashed_initial'),
+ ('user_management', '0002_squashed_create_demo_token_instances')
+ ]
+
+ operations = [
+ migrations.RunPython(create_demo_token_instances, migrations.RunPython.noop)
+ ]
diff --git a/engine/apps/base/migrations/__init__.py b/engine/apps/base/migrations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/base/models/__init__.py b/engine/apps/base/models/__init__.py
new file mode 100644
index 0000000000..7dc0d486a1
--- /dev/null
+++ b/engine/apps/base/models/__init__.py
@@ -0,0 +1,6 @@
+from .dynamic_setting import DynamicSetting # noqa: F401
+from .failed_to_invoke_celery_task import FailedToInvokeCeleryTask # noqa: F401
+from .live_setting import LiveSetting # noqa: F401
+from .organization_log_record import OrganizationLogRecord # noqa: F401
+from .user_notification_policy import UserNotificationPolicy # noqa: F401
+from .user_notification_policy_log_record import UserNotificationPolicyLogRecord # noqa: F401
diff --git a/engine/apps/base/models/dynamic_setting.py b/engine/apps/base/models/dynamic_setting.py
new file mode 100644
index 0000000000..de6ce12999
--- /dev/null
+++ b/engine/apps/base/models/dynamic_setting.py
@@ -0,0 +1,39 @@
+from django.db import IntegrityError, models
+from django.db.models import JSONField
+
+
+class DynamicSettingsManager(models.Manager):
+ def get_or_create(self, defaults=None, **kwargs):
+ """
+ Using get_or_create inside celery task sometimes triggers making two identical DynamicSettings.
+ E.g. https://gitlab.amixr.io/amixr/amixr/issues/843
+ More info: https://stackoverflow.com/questions/17960593/multipleobjectsreturned-with-get-or-create
+ Solution is to create UniqueConstraint on DynamicSetting.Name and catch IntegrityError.
+ Django 3 has built-in check https://github.com/django/django/blob/master/django/db/models/query.py#L571
+ As for now we are using Django 2.2 which has not.
+ # TODO: remove this method when we will move to Django 3
+ So it is overridden get_or_create to catch IntegrityError and just return object in this case.
+ """
+ try:
+ return super(DynamicSettingsManager, self).get_or_create(defaults=defaults, **kwargs)
+ except IntegrityError:
+ try:
+ return self.get(**kwargs), False
+ except self.model.DoesNotExist:
+ pass
+ raise
+
+
+class DynamicSetting(models.Model):
+ objects = DynamicSettingsManager()
+
+ name = models.CharField(max_length=100)
+ boolean_value = models.BooleanField(null=True, default=None)
+ numeric_value = models.IntegerField(null=True, default=None)
+ json_value = JSONField(default=None, null=True, blank=True)
+
+ class Meta:
+ constraints = [models.UniqueConstraint(fields=["name"], name="unique_dynamic_setting_name")]
+
+ def __str__(self):
+ return self.name
diff --git a/engine/apps/base/models/failed_to_invoke_celery_task.py b/engine/apps/base/models/failed_to_invoke_celery_task.py
new file mode 100644
index 0000000000..ace8bd1ce9
--- /dev/null
+++ b/engine/apps/base/models/failed_to_invoke_celery_task.py
@@ -0,0 +1,18 @@
+from django.db import models
+
+from engine.celery import app
+
+
+class FailedToInvokeCeleryTask(models.Model):
+ name = models.CharField(max_length=500)
+ parameters = models.JSONField()
+
+ is_sent = models.BooleanField(default=False)
+
+ def send(self):
+ app.send_task(
+ name=self.name,
+ args=self.parameters.get("args", []),
+ kwargs=self.parameters.get("kwargs", {}),
+ **self.parameters.get("options", {}),
+ )
diff --git a/engine/apps/base/models/live_setting.py b/engine/apps/base/models/live_setting.py
new file mode 100644
index 0000000000..c08ab11f09
--- /dev/null
+++ b/engine/apps/base/models/live_setting.py
@@ -0,0 +1,174 @@
+from django.conf import settings
+from django.core.validators import MinLengthValidator
+from django.db import models
+from django.db.models import JSONField
+
+from apps.base.utils import LiveSettingValidator
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+
+
+def generate_public_primary_key_for_live_setting():
+ prefix = "L"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while LiveSetting.objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="LiveSetting"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+class LiveSetting(models.Model):
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_live_setting,
+ )
+ name = models.CharField(max_length=50, unique=True)
+ value = JSONField(null=True, default=None)
+ error = models.TextField(null=True, default=None)
+
+ AVAILABLE_NAMES = (
+ "TWILIO_ACCOUNT_SID",
+ "TWILIO_AUTH_TOKEN",
+ "TWILIO_NUMBER",
+ "TWILIO_VERIFY_SERVICE_SID",
+ "TELEGRAM_TOKEN",
+ "SLACK_CLIENT_OAUTH_ID",
+ "SLACK_CLIENT_OAUTH_SECRET",
+ "SLACK_SIGNING_SECRET",
+ "SEND_ANONYMOUS_USAGE_STATS",
+ "GRAFANA_CLOUD_ONCALL_TOKEN",
+ "GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED",
+ )
+
+ DESCRIPTIONS = {
+ "SLACK_SIGNING_SECRET": (
+ "Check this instruction for details how to set up Slack. "
+ "Slack secrets can't be verified on the backend, please try installing the Slack Bot "
+ "after you update Slack credentials."
+ ),
+ "SLACK_CLIENT_OAUTH_SECRET": (
+ "Check this instruction for details how to set up Slack. "
+ "Slack secrets can't be verified on the backend, please try installing the Slack Bot "
+ "after you update Slack credentials."
+ ),
+ "SLACK_CLIENT_OAUTH_ID": (
+ "Check this instruction for details how to set up Slack. "
+ "Slack secrets can't be verified on the backend, please try installing the Slack Bot "
+ "after you update Slack credentials."
+ ),
+ "TWILIO_ACCOUNT_SID": (
+ "Twilio username to allow amixr send sms and make phone calls, "
+ ""
+ "more info ."
+ ),
+ "TWILIO_AUTH_TOKEN": (
+ "Twilio password to allow amixr send sms and make calls, "
+ ""
+ "more info ."
+ ),
+ "TWILIO_NUMBER": (
+ "Number from which you will receive calls and SMS, "
+ "more info ."
+ ),
+ "TWILIO_VERIFY_SERVICE_SID": (
+ "SID of Twilio service for number verification. "
+ "You can create a service in Twilio web interface. "
+ "twilio.com -> verify -> create new service."
+ ),
+ "SENDGRID_API_KEY": (
+ "Sendgrid api key to send emails, "
+ "more info ."
+ ),
+ "SENDGRID_FROM_EMAIL": (
+ "Address to send emails, " "more info ."
+ ),
+ "SENDGRID_SECRET_KEY": "It is the secret key to secure receiving inbound emails.",
+ "SENDGRID_INBOUND_EMAIL_DOMAIN": "Domain to receive emails for inbound emails integration.",
+ "TELEGRAM_TOKEN": (
+ "Secret token for Telegram bot, you can get one via " "BotFather ."
+ ),
+ "SEND_ANONYMOUS_USAGE_STATS": (
+ "Grafana OnCall will send anonymous, but uniquely-identifiable usage analytics to Grafana Labs."
+ " These statistics are sent to https://stats.grafana.org/. For more information on what's sent, look at"
+ "https://github.com/..." # TODO: add url to usage stats code
+ ),
+ "GRAFANA_CLOUD_ONCALL_TOKEN": "Secret token for Grafana Cloud OnCall instance.",
+ "GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED": "Enable hearbeat integration with Grafana Cloud OnCall.",
+ }
+
+ SECRET_SETTING_NAMES = (
+ "TWILIO_ACCOUNT_SID",
+ "TWILIO_AUTH_TOKEN",
+ "TWILIO_VERIFY_SERVICE_SID",
+ "SENDGRID_API_KEY",
+ "SENDGRID_SECRET_KEY",
+ "SLACK_CLIENT_OAUTH_ID",
+ "SLACK_CLIENT_OAUTH_SECRET",
+ "SLACK_SIGNING_SECRET",
+ "TELEGRAM_TOKEN",
+ "GRAFANA_CLOUD_ONCALL_TOKEN",
+ )
+
+ def __str__(self):
+ return self.name
+
+ @property
+ def description(self):
+ return self.DESCRIPTIONS.get(self.name)
+
+ @property
+ def default_value(self):
+ return self._get_setting_from_setting_file(self.name)
+
+ @property
+ def is_secret(self):
+ return self.name in self.SECRET_SETTING_NAMES
+
+ @classmethod
+ def get_setting(cls, setting_name):
+ if not settings.FEATURE_LIVE_SETTINGS_ENABLED:
+ return cls._get_setting_from_setting_file(setting_name)
+
+ if setting_name not in cls.AVAILABLE_NAMES:
+ raise ValueError(
+ f"Setting with name '{setting_name}' is not in list of available names {cls.AVAILABLE_NAMES}"
+ )
+
+ live_setting = cls.objects.filter(name=setting_name).first()
+ if live_setting is not None:
+ return live_setting.value
+ else:
+ return cls._get_setting_from_setting_file(setting_name)
+
+ @classmethod
+ def populate_settings_if_needed(cls):
+ settings_in_db = cls.objects.filter(name__in=cls.AVAILABLE_NAMES).values_list("name", flat=True)
+ setting_names_to_populate = set(cls.AVAILABLE_NAMES) - set(settings_in_db)
+
+ for setting_name in setting_names_to_populate:
+ cls.objects.create(name=setting_name, value=cls._get_setting_from_setting_file(setting_name))
+
+ @staticmethod
+ def _get_setting_from_setting_file(setting_name):
+ return getattr(settings, setting_name)
+
+ def save(self, *args, **kwargs):
+ if self.name not in self.AVAILABLE_NAMES:
+ raise ValueError(
+ f"Setting with name '{self.name}' is not in list of available names {self.AVAILABLE_NAMES}"
+ )
+
+ self.error = LiveSettingValidator(live_setting=self).get_error()
+ super().save(*args, **kwargs)
diff --git a/engine/apps/base/models/organization_log_record.py b/engine/apps/base/models/organization_log_record.py
new file mode 100644
index 0000000000..9f4e06b36e
--- /dev/null
+++ b/engine/apps/base/models/organization_log_record.py
@@ -0,0 +1,317 @@
+from django.apps import apps
+from django.conf import settings
+from django.core.validators import MinLengthValidator
+from django.db import models
+from django.db.models import JSONField
+from emoji import emojize
+
+from apps.alerts.models.maintainable_object import MaintainableObject
+from apps.user_management.organization_log_creator import OrganizationLogType
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+
+
+def generate_public_primary_key_for_organization_log():
+ prefix = "V"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while OrganizationLogRecord.objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="OrganizationLogRecord"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+class OrganizationLogRecordManager(models.Manager):
+ def create(self, organization, author, type, description):
+ # set labels
+ labels = OrganizationLogRecord.LABELS_FOR_TYPE[type]
+ return super().create(
+ organization=organization,
+ author=author,
+ description=description,
+ _labels=labels,
+ )
+
+
+class OrganizationLogRecord(models.Model):
+
+ objects = OrganizationLogRecordManager()
+
+ LABEL_ORGANIZATION = "organization"
+ LABEL_SLACK = "slack"
+ LABEL_TELEGRAM = "telegram"
+ LABEL_DEFAULT_CHANNEL = "default channel"
+ LABEL_SLACK_WORKSPACE_CONNECTED = "slack workspace connected"
+ LABEL_SLACK_WORKSPACE_DISCONNECTED = "slack workspace disconnected"
+ LABEL_TELEGRAM_CHANNEL_CONNECTED = "telegram channel connected"
+ LABEL_TELEGRAM_CHANNEL_DISCONNECTED = "telegram channel disconnected"
+ LABEL_INTEGRATION = "integration"
+ LABEL_INTEGRATION_CREATED = "integration created"
+ LABEL_INTEGRATION_DELETED = "integration deleted"
+ LABEL_INTEGRATION_CHANGED = "integration changed"
+ LABEL_INTEGRATION_HEARTBEAT = "integration heartbeat"
+ LABEL_INTEGRATION_HEARTBEAT_CREATED = "integration heartbeat created"
+ LABEL_INTEGRATION_HEARTBEAT_CHANGED = "integration heartbeat changed"
+ LABEL_MAINTENANCE = "maintenance"
+ LABEL_MAINTENANCE_STARTED = "maintenance started"
+ LABEL_MAINTENANCE_STOPPED = "maintenance stopped"
+ LABEL_DEBUG = "debug"
+ LABEL_DEBUG_STARTED = "debug started"
+ LABEL_DEBUG_STOPPED = "debug stopped"
+ LABEL_CHANNEL_FILTER = "route"
+ LABEL_CHANNEL_FILTER_CREATED = "route created"
+ LABEL_CHANNEL_FILTER_CHANGED = "route changed"
+ LABEL_CHANNEL_FILTER_DELETED = "route deleted"
+ LABEL_ESCALATION_CHAIN = "escalation chain"
+ LABEL_ESCALATION_CHAIN_CREATED = "escalation chain created"
+ LABEL_ESCALATION_CHAIN_DELETED = "escalation chain deleted"
+ LABEL_ESCALATION_CHAIN_CHANGED = "escalation chain changed"
+ LABEL_ESCALATION_POLICY = "escalation policy"
+ LABEL_ESCALATION_POLICY_CREATED = "escalation policy created"
+ LABEL_ESCALATION_POLICY_DELETED = "escalation policy deleted"
+ LABEL_ESCALATION_POLICY_CHANGED = "escalation policy changed"
+ LABEL_CUSTOM_ACTION = "custom action"
+ LABEL_CUSTOM_ACTION_CREATED = "custom action created"
+ LABEL_CUSTOM_ACTION_DELETED = "custom action deleted"
+ LABEL_CUSTOM_ACTION_CHANGED = "custom action changed"
+ LABEL_SCHEDULE = "schedule"
+ LABEL_SCHEDULE_CREATED = "schedule created"
+ LABEL_SCHEDULE_DELETED = "schedule deleted"
+ LABEL_SCHEDULE_CHANGED = "schedule changed"
+ LABEL_ON_CALL_SHIFT = "on-call shift"
+ LABEL_ON_CALL_SHIFT_CREATED = "on-call shift created"
+ LABEL_ON_CALL_SHIFT_DELETED = "on-call shift deleted"
+ LABEL_ON_CALL_SHIFT_CHANGED = "on-call shift changed"
+ LABEL_USER = "user"
+ LABEL_USER_CREATED = "user created"
+ LABEL_USER_SETTINGS_CHANGED = "user changed"
+ LABEL_ORGANIZATION_SETTINGS_CHANGED = "organization settings changed"
+ LABEL_TELEGRAM_TO_USER_CONNECTED = "telegram to user connected"
+ LABEL_TELEGRAM_FROM_USER_DISCONNECTED = "telegram from user disconnected"
+ LABEL_API_TOKEN = "api token"
+ LABEL_API_TOKEN_CREATED = "api token created"
+ LABEL_API_TOKEN_REVOKED = "api token revoked"
+ LABEL_ESCALATION_CHAIN_COPIED = "escalation chain copied"
+ LABEL_SCHEDULE_EXPORT_TOKEN = "schedule export token"
+ LABEL_SCHEDULE_EXPORT_TOKEN_CREATED = "schedule export token created"
+ LABEL_MESSAGING_BACKEND_CHANNEL_CHANGED = "messaging backend channel changed"
+ LABEL_MESSAGING_BACKEND_CHANNEL_DELETED = "messaging backend channel deleted"
+ LABEL_MESSAGING_BACKEND_USER_DISCONNECTED = "messaging backend user disconnected"
+
+ LABELS = [
+ LABEL_ORGANIZATION,
+ LABEL_SLACK,
+ LABEL_TELEGRAM,
+ LABEL_DEFAULT_CHANNEL,
+ LABEL_SLACK_WORKSPACE_CONNECTED,
+ LABEL_SLACK_WORKSPACE_DISCONNECTED,
+ LABEL_TELEGRAM_CHANNEL_CONNECTED,
+ LABEL_TELEGRAM_CHANNEL_DISCONNECTED,
+ LABEL_INTEGRATION,
+ LABEL_INTEGRATION_CREATED,
+ LABEL_INTEGRATION_DELETED,
+ LABEL_INTEGRATION_CHANGED,
+ LABEL_INTEGRATION_HEARTBEAT,
+ LABEL_INTEGRATION_HEARTBEAT_CREATED,
+ LABEL_INTEGRATION_HEARTBEAT_CHANGED,
+ LABEL_MAINTENANCE,
+ LABEL_MAINTENANCE_STARTED,
+ LABEL_MAINTENANCE_STOPPED,
+ LABEL_DEBUG,
+ LABEL_DEBUG_STARTED,
+ LABEL_DEBUG_STOPPED,
+ LABEL_CHANNEL_FILTER,
+ LABEL_CHANNEL_FILTER_CREATED,
+ LABEL_CHANNEL_FILTER_CHANGED,
+ LABEL_CHANNEL_FILTER_DELETED,
+ LABEL_ESCALATION_CHAIN,
+ LABEL_ESCALATION_CHAIN_CREATED,
+ LABEL_ESCALATION_CHAIN_DELETED,
+ LABEL_ESCALATION_CHAIN_CHANGED,
+ LABEL_ESCALATION_POLICY,
+ LABEL_ESCALATION_POLICY_CREATED,
+ LABEL_ESCALATION_POLICY_DELETED,
+ LABEL_ESCALATION_POLICY_CHANGED,
+ LABEL_CUSTOM_ACTION,
+ LABEL_CUSTOM_ACTION_CREATED,
+ LABEL_CUSTOM_ACTION_DELETED,
+ LABEL_CUSTOM_ACTION_CHANGED,
+ LABEL_SCHEDULE,
+ LABEL_SCHEDULE_CREATED,
+ LABEL_SCHEDULE_DELETED,
+ LABEL_SCHEDULE_CHANGED,
+ LABEL_ON_CALL_SHIFT,
+ LABEL_ON_CALL_SHIFT_CREATED,
+ LABEL_ON_CALL_SHIFT_DELETED,
+ LABEL_ON_CALL_SHIFT_CHANGED,
+ LABEL_USER,
+ LABEL_USER_CREATED,
+ LABEL_USER_SETTINGS_CHANGED,
+ LABEL_ORGANIZATION_SETTINGS_CHANGED,
+ LABEL_TELEGRAM_TO_USER_CONNECTED,
+ LABEL_TELEGRAM_FROM_USER_DISCONNECTED,
+ LABEL_API_TOKEN,
+ LABEL_API_TOKEN_CREATED,
+ LABEL_API_TOKEN_REVOKED,
+ LABEL_ESCALATION_CHAIN_COPIED,
+ LABEL_SCHEDULE_EXPORT_TOKEN,
+ LABEL_MESSAGING_BACKEND_CHANNEL_CHANGED,
+ LABEL_MESSAGING_BACKEND_CHANNEL_DELETED,
+ LABEL_MESSAGING_BACKEND_USER_DISCONNECTED,
+ ]
+
+ LABELS_FOR_TYPE = {
+ OrganizationLogType.TYPE_SLACK_DEFAULT_CHANNEL_CHANGED: [LABEL_SLACK, LABEL_DEFAULT_CHANNEL],
+ OrganizationLogType.TYPE_SLACK_WORKSPACE_CONNECTED: [LABEL_SLACK, LABEL_SLACK_WORKSPACE_CONNECTED],
+ OrganizationLogType.TYPE_SLACK_WORKSPACE_DISCONNECTED: [LABEL_SLACK, LABEL_SLACK_WORKSPACE_DISCONNECTED],
+ OrganizationLogType.TYPE_TELEGRAM_DEFAULT_CHANNEL_CHANGED: [LABEL_TELEGRAM, LABEL_DEFAULT_CHANNEL],
+ OrganizationLogType.TYPE_TELEGRAM_CHANNEL_CONNECTED: [LABEL_TELEGRAM, LABEL_TELEGRAM_CHANNEL_CONNECTED],
+ OrganizationLogType.TYPE_TELEGRAM_CHANNEL_DISCONNECTED: [LABEL_TELEGRAM, LABEL_TELEGRAM_CHANNEL_DISCONNECTED],
+ OrganizationLogType.TYPE_INTEGRATION_CREATED: [LABEL_INTEGRATION, LABEL_INTEGRATION_CREATED],
+ OrganizationLogType.TYPE_INTEGRATION_DELETED: [LABEL_INTEGRATION, LABEL_INTEGRATION_DELETED],
+ OrganizationLogType.TYPE_INTEGRATION_CHANGED: [LABEL_INTEGRATION, LABEL_INTEGRATION_CHANGED],
+ OrganizationLogType.TYPE_HEARTBEAT_CREATED: [LABEL_INTEGRATION_HEARTBEAT, LABEL_INTEGRATION_HEARTBEAT_CREATED],
+ OrganizationLogType.TYPE_HEARTBEAT_CHANGED: [LABEL_INTEGRATION_HEARTBEAT, LABEL_INTEGRATION_HEARTBEAT_CHANGED],
+ OrganizationLogType.TYPE_CHANNEL_FILTER_CREATED: [LABEL_CHANNEL_FILTER, LABEL_CHANNEL_FILTER_CREATED],
+ OrganizationLogType.TYPE_CHANNEL_FILTER_DELETED: [LABEL_CHANNEL_FILTER, LABEL_CHANNEL_FILTER_DELETED],
+ OrganizationLogType.TYPE_CHANNEL_FILTER_CHANGED: [LABEL_CHANNEL_FILTER, LABEL_CHANNEL_FILTER_CHANGED],
+ OrganizationLogType.TYPE_ESCALATION_CHAIN_CREATED: [LABEL_ESCALATION_CHAIN, LABEL_ESCALATION_CHAIN_CREATED],
+ OrganizationLogType.TYPE_ESCALATION_CHAIN_DELETED: [LABEL_ESCALATION_CHAIN, LABEL_ESCALATION_CHAIN_DELETED],
+ OrganizationLogType.TYPE_ESCALATION_CHAIN_CHANGED: [LABEL_ESCALATION_CHAIN, LABEL_ESCALATION_CHAIN_CHANGED],
+ OrganizationLogType.TYPE_ESCALATION_STEP_CREATED: [LABEL_ESCALATION_POLICY, LABEL_ESCALATION_POLICY_CREATED],
+ OrganizationLogType.TYPE_ESCALATION_STEP_DELETED: [LABEL_ESCALATION_POLICY, LABEL_ESCALATION_POLICY_DELETED],
+ OrganizationLogType.TYPE_ESCALATION_STEP_CHANGED: [LABEL_ESCALATION_POLICY, LABEL_ESCALATION_POLICY_CHANGED],
+ OrganizationLogType.TYPE_MAINTENANCE_STARTED_FOR_ORGANIZATION: [
+ LABEL_MAINTENANCE,
+ LABEL_MAINTENANCE_STARTED,
+ LABEL_ORGANIZATION,
+ ],
+ OrganizationLogType.TYPE_MAINTENANCE_STARTED_FOR_INTEGRATION: [
+ LABEL_MAINTENANCE,
+ LABEL_MAINTENANCE_STARTED,
+ LABEL_INTEGRATION,
+ ],
+ OrganizationLogType.TYPE_MAINTENANCE_STOPPED_FOR_ORGANIZATION: [
+ LABEL_MAINTENANCE,
+ LABEL_MAINTENANCE_STOPPED,
+ LABEL_ORGANIZATION,
+ ],
+ OrganizationLogType.TYPE_MAINTENANCE_STOPPED_FOR_INTEGRATION: [
+ LABEL_MAINTENANCE,
+ LABEL_MAINTENANCE_STOPPED,
+ LABEL_INTEGRATION,
+ ],
+ OrganizationLogType.TYPE_MAINTENANCE_DEBUG_STARTED_FOR_ORGANIZATION: [
+ LABEL_DEBUG,
+ LABEL_DEBUG_STARTED,
+ LABEL_ORGANIZATION,
+ ],
+ OrganizationLogType.TYPE_MAINTENANCE_DEBUG_STARTED_FOR_INTEGRATION: [
+ LABEL_DEBUG,
+ LABEL_DEBUG_STARTED,
+ LABEL_INTEGRATION,
+ ],
+ OrganizationLogType.TYPE_MAINTENANCE_DEBUG_STOPPED_FOR_ORGANIZATION: [
+ LABEL_DEBUG,
+ LABEL_DEBUG_STOPPED,
+ LABEL_ORGANIZATION,
+ ],
+ OrganizationLogType.TYPE_MAINTENANCE_DEBUG_STOPPED_FOR_INTEGRATION: [
+ LABEL_DEBUG,
+ LABEL_DEBUG_STOPPED,
+ LABEL_INTEGRATION,
+ ],
+ OrganizationLogType.TYPE_CUSTOM_ACTION_CREATED: [LABEL_CUSTOM_ACTION, LABEL_CUSTOM_ACTION_CREATED],
+ OrganizationLogType.TYPE_CUSTOM_ACTION_DELETED: [LABEL_CUSTOM_ACTION, LABEL_CUSTOM_ACTION_DELETED],
+ OrganizationLogType.TYPE_CUSTOM_ACTION_CHANGED: [LABEL_CUSTOM_ACTION, LABEL_CUSTOM_ACTION_CHANGED],
+ OrganizationLogType.TYPE_SCHEDULE_CREATED: [LABEL_SCHEDULE, LABEL_SCHEDULE_CREATED],
+ OrganizationLogType.TYPE_SCHEDULE_DELETED: [LABEL_SCHEDULE, LABEL_SCHEDULE_DELETED],
+ OrganizationLogType.TYPE_SCHEDULE_CHANGED: [LABEL_SCHEDULE, LABEL_SCHEDULE_CHANGED],
+ OrganizationLogType.TYPE_ON_CALL_SHIFT_CREATED: [LABEL_ON_CALL_SHIFT, LABEL_ON_CALL_SHIFT_CREATED],
+ OrganizationLogType.TYPE_ON_CALL_SHIFT_DELETED: [LABEL_ON_CALL_SHIFT, LABEL_ON_CALL_SHIFT_DELETED],
+ OrganizationLogType.TYPE_ON_CALL_SHIFT_CHANGED: [LABEL_ON_CALL_SHIFT, LABEL_ON_CALL_SHIFT_CHANGED],
+ OrganizationLogType.TYPE_NEW_USER_ADDED: [LABEL_USER, LABEL_USER_CREATED],
+ OrganizationLogType.TYPE_ORGANIZATION_SETTINGS_CHANGED: [
+ LABEL_ORGANIZATION,
+ LABEL_ORGANIZATION_SETTINGS_CHANGED,
+ ],
+ OrganizationLogType.TYPE_USER_SETTINGS_CHANGED: [LABEL_USER, LABEL_USER_SETTINGS_CHANGED],
+ OrganizationLogType.TYPE_TELEGRAM_TO_USER_CONNECTED: [LABEL_TELEGRAM, LABEL_TELEGRAM_TO_USER_CONNECTED],
+ OrganizationLogType.TYPE_TELEGRAM_FROM_USER_DISCONNECTED: [
+ LABEL_TELEGRAM,
+ LABEL_TELEGRAM_FROM_USER_DISCONNECTED,
+ ],
+ OrganizationLogType.TYPE_API_TOKEN_CREATED: [LABEL_API_TOKEN, LABEL_API_TOKEN_CREATED],
+ OrganizationLogType.TYPE_API_TOKEN_REVOKED: [LABEL_API_TOKEN, LABEL_API_TOKEN_REVOKED],
+ OrganizationLogType.TYPE_ESCALATION_CHAIN_COPIED: [LABEL_ESCALATION_CHAIN, LABEL_ESCALATION_CHAIN_COPIED],
+ OrganizationLogType.TYPE_SCHEDULE_EXPORT_TOKEN_CREATED: [
+ LABEL_SCHEDULE_EXPORT_TOKEN,
+ LABEL_SCHEDULE_EXPORT_TOKEN_CREATED,
+ ],
+ OrganizationLogType.TYPE_MESSAGING_BACKEND_CHANNEL_CHANGED: [LABEL_MESSAGING_BACKEND_CHANNEL_CHANGED],
+ OrganizationLogType.TYPE_MESSAGING_BACKEND_CHANNEL_DELETED: [LABEL_MESSAGING_BACKEND_CHANNEL_DELETED],
+ OrganizationLogType.TYPE_MESSAGING_BACKEND_USER_DISCONNECTED: [LABEL_MESSAGING_BACKEND_USER_DISCONNECTED],
+ }
+
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_organization_log,
+ )
+
+ organization = models.ForeignKey(
+ "user_management.Organization", on_delete=models.CASCADE, related_name="log_records"
+ )
+ author = models.ForeignKey(
+ "user_management.User",
+ on_delete=models.SET_NULL,
+ related_name="team_log_records",
+ default=None,
+ null=True,
+ )
+
+ created_at = models.DateTimeField(auto_now_add=True)
+ description = models.TextField(null=True, default=None)
+ _labels = JSONField(default=list)
+
+ @property
+ def labels(self):
+ return self._labels
+
+ @staticmethod
+ def get_log_type_and_maintainable_object_verbal(maintainable_obj, mode, verbal, stopped=False):
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+ Organization = apps.get_model("user_management", "Organization")
+ object_verbal_map = {
+ AlertReceiveChannel: f"integration {emojize(verbal, use_aliases=True)}",
+ Organization: "organization",
+ }
+ if stopped:
+ log_type_map = {
+ AlertReceiveChannel: {
+ MaintainableObject.DEBUG_MAINTENANCE: OrganizationLogType.TYPE_MAINTENANCE_DEBUG_STOPPED_FOR_INTEGRATION,
+ MaintainableObject.MAINTENANCE: OrganizationLogType.TYPE_MAINTENANCE_STOPPED_FOR_INTEGRATION,
+ },
+ Organization: {
+ MaintainableObject.DEBUG_MAINTENANCE: OrganizationLogType.TYPE_MAINTENANCE_DEBUG_STOPPED_FOR_ORGANIZATION,
+ MaintainableObject.MAINTENANCE: OrganizationLogType.TYPE_MAINTENANCE_STOPPED_FOR_ORGANIZATION,
+ },
+ }
+ else:
+ log_type_map = {
+ AlertReceiveChannel: {
+ MaintainableObject.DEBUG_MAINTENANCE: OrganizationLogType.TYPE_MAINTENANCE_DEBUG_STARTED_FOR_INTEGRATION,
+ MaintainableObject.MAINTENANCE: OrganizationLogType.TYPE_MAINTENANCE_STARTED_FOR_INTEGRATION,
+ },
+ Organization: {
+ MaintainableObject.DEBUG_MAINTENANCE: OrganizationLogType.TYPE_MAINTENANCE_DEBUG_STARTED_FOR_ORGANIZATION,
+ MaintainableObject.MAINTENANCE: OrganizationLogType.TYPE_MAINTENANCE_STARTED_FOR_ORGANIZATION,
+ },
+ }
+ log_type = log_type_map[type(maintainable_obj)][mode]
+ object_verbal = object_verbal_map[type(maintainable_obj)]
+ return log_type, object_verbal
diff --git a/engine/apps/base/models/user_notification_policy.py b/engine/apps/base/models/user_notification_policy.py
new file mode 100644
index 0000000000..fd2087f6da
--- /dev/null
+++ b/engine/apps/base/models/user_notification_policy.py
@@ -0,0 +1,293 @@
+from typing import Tuple
+
+from django.conf import settings
+from django.core.exceptions import ValidationError
+from django.core.validators import MinLengthValidator
+from django.db import models, transaction
+from django.db.models import Q, QuerySet
+from django.utils import timezone
+from ordered_model.models import OrderedModel
+
+from apps.base.messaging import get_messaging_backends
+from apps.user_management.models import User
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+
+
+def generate_public_primary_key_for_notification_policy():
+ prefix = "N"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while UserNotificationPolicy.objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="UserNotificationPolicy"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+# base supported notification backends
+BUILT_IN_BACKENDS = (
+ "SLACK",
+ "SMS",
+ "PHONE_CALL",
+ "TELEGRAM",
+ "EMAIL",
+ "MOBILE_PUSH_GENERAL",
+ "MOBILE_PUSH_CRITICAL",
+)
+
+
+def _notification_channel_choices():
+ """Return dynamically built choices for available notification channel backends."""
+
+ # Enum containing notification channel choices on the database level.
+ # Also see NotificationChannelOptions class with more logic on notification channels.
+ # Do not remove items from this enum if you just want to disable a notification channel temporarily,
+ # use NotificationChannelOptions.AVAILABLE_FOR_USE instead.
+ supported_backends = list(BUILT_IN_BACKENDS)
+
+ for backend_id, _ in get_messaging_backends():
+ supported_backends.append(backend_id)
+
+ channels_enum = models.IntegerChoices("NotificationChannel", supported_backends, start=0)
+ return channels_enum
+
+
+_notification_channels = _notification_channel_choices()
+
+
+def validate_channel_choice(value):
+ if value is None:
+ return
+ try:
+ _notification_channels(value)
+ except ValueError:
+ raise ValidationError("%(value)s is not a valid option", params={"value": value})
+
+
+class UserNotificationPolicyQuerySet(models.QuerySet):
+ def get_or_create_for_user(self, user: User, important: bool) -> "QuerySet[UserNotificationPolicy]":
+ with transaction.atomic():
+ User.objects.select_for_update().get(pk=user.pk)
+ return self._get_or_create_for_user(user, important)
+
+ def _get_or_create_for_user(self, user: User, important: bool) -> "QuerySet[UserNotificationPolicy]":
+ notification_policies = super().filter(user=user, important=important)
+
+ if notification_policies.exists():
+ return notification_policies
+
+ old_state = user.repr_settings_for_client_side_logging
+ if important:
+ policies = self.create_important_policies_for_user(user)
+ else:
+ policies = self.create_default_policies_for_user(user)
+
+ new_state = user.repr_settings_for_client_side_logging
+ description = f"User settings for user {user.username} was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(
+ user.organization,
+ None,
+ OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
+ description,
+ )
+ return policies
+
+ def create_default_policies_for_user(self, user: User) -> "QuerySet[UserNotificationPolicy]":
+ model = self.model
+
+ policies_to_create = (
+ model(
+ user=user,
+ step=model.Step.NOTIFY,
+ notify_by=NotificationChannelOptions.DEFAULT_NOTIFICATION_CHANNEL,
+ order=0,
+ ),
+ model(user=user, step=model.Step.WAIT, wait_delay=timezone.timedelta(minutes=15), order=1),
+ model(user=user, step=model.Step.NOTIFY, notify_by=model.NotificationChannel.PHONE_CALL, order=2),
+ )
+
+ super().bulk_create(policies_to_create)
+ return user.notification_policies.filter(important=False)
+
+ def create_important_policies_for_user(self, user: User) -> "QuerySet[UserNotificationPolicy]":
+ model = self.model
+
+ policies_to_create = (
+ model(
+ user=user,
+ step=model.Step.NOTIFY,
+ notify_by=model.NotificationChannel.PHONE_CALL,
+ important=True,
+ order=0,
+ ),
+ )
+
+ super().bulk_create(policies_to_create)
+ return user.notification_policies.filter(important=True)
+
+
+class UserNotificationPolicy(OrderedModel):
+ objects = UserNotificationPolicyQuerySet.as_manager()
+ order_with_respect_to = ("user", "important")
+
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_notification_policy,
+ )
+
+ user = models.ForeignKey(
+ "user_management.User", on_delete=models.CASCADE, related_name="notification_policies", default=None, null=True
+ )
+
+ class Step(models.IntegerChoices):
+ WAIT = 0, "Wait"
+ NOTIFY = 1, "Notify by"
+
+ step = models.PositiveSmallIntegerField(choices=Step.choices, default=None, null=True)
+
+ NotificationChannel = _notification_channels
+ notify_by = models.PositiveSmallIntegerField(default=0, validators=[validate_channel_choice])
+
+ ONE_MINUTE = timezone.timedelta(minutes=1)
+ FIVE_MINUTES = timezone.timedelta(minutes=5)
+ FIFTEEN_MINUTES = timezone.timedelta(minutes=15)
+ THIRTY_MINUTES = timezone.timedelta(minutes=30)
+ HOUR = timezone.timedelta(minutes=60)
+
+ DURATION_CHOICES = (
+ (ONE_MINUTE, "1 min"),
+ (FIVE_MINUTES, "5 min"),
+ (FIFTEEN_MINUTES, "15 min"),
+ (THIRTY_MINUTES, "30 min"),
+ (HOUR, "60 min"),
+ )
+
+ wait_delay = models.DurationField(default=None, null=True, choices=DURATION_CHOICES)
+
+ important = models.BooleanField(default=False)
+
+ class Meta:
+ ordering = ("order",)
+
+ def __str__(self):
+ return f"{self.pk}: {self.short_verbal}"
+
+ @classmethod
+ def get_short_verbals_for_user(cls, user: User) -> Tuple[Tuple[str], Tuple[str]]:
+ is_wait_step = Q(step=cls.Step.WAIT)
+ is_wait_step_configured = Q(wait_delay__isnull=False)
+
+ policies = cls.objects.filter(Q(user=user, step__isnull=False) & (~is_wait_step | is_wait_step_configured))
+
+ default = tuple(str(policy.short_verbal) for policy in policies if policy.important is False)
+ important = tuple(str(policy.short_verbal) for policy in policies if policy.important is True)
+
+ return default, important
+
+ @property
+ def short_verbal(self) -> str:
+ if self.step == UserNotificationPolicy.Step.NOTIFY:
+ try:
+ notification_channel = self.NotificationChannel(self.notify_by)
+ except ValueError:
+ return "Not set"
+ return NotificationChannelAPIOptions.SHORT_LABELS[notification_channel]
+ elif self.step == UserNotificationPolicy.Step.WAIT:
+ if self.wait_delay is None:
+ return "0 min"
+ else:
+ return self.get_wait_delay_display()
+ else:
+ return "Not set"
+
+
+class NotificationChannelOptions:
+ """
+ NotificationChannelOptions encapsulates logic of notification channel representation for API and public API,
+ integration constraints and contains a list of available notification channels.
+
+ To prohibit using a notification channel, remove it from AVAILABLE_FOR_USE list.
+ Note that removing a notification channel from AVAILABLE_FOR_USE removes it from API and public API,
+ but doesn't change anything in the database.
+ """
+
+ AVAILABLE_FOR_USE = [
+ UserNotificationPolicy.NotificationChannel.SLACK,
+ UserNotificationPolicy.NotificationChannel.SMS,
+ UserNotificationPolicy.NotificationChannel.PHONE_CALL,
+ UserNotificationPolicy.NotificationChannel.TELEGRAM,
+ UserNotificationPolicy.NotificationChannel.MOBILE_PUSH_GENERAL,
+ UserNotificationPolicy.NotificationChannel.MOBILE_PUSH_CRITICAL,
+ ] + [
+ getattr(UserNotificationPolicy.NotificationChannel, backend_id)
+ for backend_id, b in get_messaging_backends()
+ if b.available_for_use
+ ]
+
+ DEFAULT_NOTIFICATION_CHANNEL = UserNotificationPolicy.NotificationChannel.SLACK
+
+ SLACK_INTEGRATION_REQUIRED_NOTIFICATION_CHANNELS = [UserNotificationPolicy.NotificationChannel.SLACK]
+ TELEGRAM_INTEGRATION_REQUIRED_NOTIFICATION_CHANNELS = [UserNotificationPolicy.NotificationChannel.TELEGRAM]
+ EMAIL_INTEGRATION_REQUIRED_NOTIFICATION_CHANNELS = [UserNotificationPolicy.NotificationChannel.EMAIL]
+ MOBILE_APP_INTEGRATION_REQUIRED_NOTIFICATION_CHANNELS = [
+ UserNotificationPolicy.NotificationChannel.MOBILE_PUSH_GENERAL,
+ UserNotificationPolicy.NotificationChannel.MOBILE_PUSH_CRITICAL,
+ ]
+
+
+class NotificationChannelAPIOptions(NotificationChannelOptions):
+ LABELS = {
+ UserNotificationPolicy.NotificationChannel.SLACK: "Slack mentions",
+ UserNotificationPolicy.NotificationChannel.SMS: "SMS \U00002709\U0001F4F2",
+ UserNotificationPolicy.NotificationChannel.PHONE_CALL: "Phone call \U0000260E",
+ UserNotificationPolicy.NotificationChannel.TELEGRAM: "Telegram \U0001F916",
+ UserNotificationPolicy.NotificationChannel.EMAIL: "Email \U0001F4E8",
+ UserNotificationPolicy.NotificationChannel.MOBILE_PUSH_GENERAL: "Mobile App",
+ UserNotificationPolicy.NotificationChannel.MOBILE_PUSH_CRITICAL: "Mobile App Critical",
+ }
+ LABELS.update(
+ {
+ getattr(UserNotificationPolicy.NotificationChannel, backend_id): b.label
+ for backend_id, b in get_messaging_backends()
+ }
+ )
+
+ SHORT_LABELS = {
+ UserNotificationPolicy.NotificationChannel.SLACK: "Slack",
+ UserNotificationPolicy.NotificationChannel.SMS: "SMS",
+ UserNotificationPolicy.NotificationChannel.PHONE_CALL: "\U0000260E",
+ UserNotificationPolicy.NotificationChannel.TELEGRAM: "Telegram",
+ UserNotificationPolicy.NotificationChannel.EMAIL: "Email",
+ UserNotificationPolicy.NotificationChannel.MOBILE_PUSH_GENERAL: "Mobile App",
+ UserNotificationPolicy.NotificationChannel.MOBILE_PUSH_CRITICAL: "Mobile App Critical",
+ }
+ SHORT_LABELS.update(
+ {
+ getattr(UserNotificationPolicy.NotificationChannel, backend_id): b.short_label
+ for backend_id, b in get_messaging_backends()
+ }
+ )
+
+
+class NotificationChannelPublicAPIOptions(NotificationChannelAPIOptions):
+ LABELS = {
+ UserNotificationPolicy.NotificationChannel.SLACK: "notify_by_slack",
+ UserNotificationPolicy.NotificationChannel.SMS: "notify_by_sms",
+ UserNotificationPolicy.NotificationChannel.PHONE_CALL: "notify_by_phone_call",
+ UserNotificationPolicy.NotificationChannel.TELEGRAM: "notify_by_telegram",
+ UserNotificationPolicy.NotificationChannel.EMAIL: "notify_by_email",
+ UserNotificationPolicy.NotificationChannel.MOBILE_PUSH_GENERAL: "notify_by_mobile_app",
+ UserNotificationPolicy.NotificationChannel.MOBILE_PUSH_CRITICAL: "notify_by_mobile_app_critical",
+ }
+ LABELS.update(
+ {
+ getattr(UserNotificationPolicy.NotificationChannel, backend_id): "notify_by_{}".format(b.backend_id.lower())
+ for backend_id, b in get_messaging_backends()
+ }
+ )
diff --git a/engine/apps/base/models/user_notification_policy_log_record.py b/engine/apps/base/models/user_notification_policy_log_record.py
new file mode 100644
index 0000000000..93fd082086
--- /dev/null
+++ b/engine/apps/base/models/user_notification_policy_log_record.py
@@ -0,0 +1,318 @@
+import logging
+
+import humanize
+from django.db import models
+from django.db.models.signals import post_save
+from django.dispatch import receiver
+from django.utils.functional import cached_property
+from rest_framework.fields import DateTimeField
+
+from apps.alerts.tasks import send_update_log_report_signal
+from apps.alerts.utils import render_relative_timeline
+from apps.base.messaging import get_messaging_backend_from_id
+from apps.base.models import UserNotificationPolicy
+from apps.base.models.user_notification_policy import validate_channel_choice
+from apps.slack.slack_formatter import SlackFormatter
+from common.utils import clean_markup
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+class UserNotificationPolicyLogRecord(models.Model):
+
+ (
+ TYPE_PERSONAL_NOTIFICATION_TRIGGERED,
+ TYPE_PERSONAL_NOTIFICATION_FINISHED,
+ TYPE_PERSONAL_NOTIFICATION_SUCCESS,
+ TYPE_PERSONAL_NOTIFICATION_FAILED,
+ ) = range(4)
+
+ TYPE_TO_HANDLERS_MAP = {
+ TYPE_PERSONAL_NOTIFICATION_TRIGGERED: "triggered",
+ TYPE_PERSONAL_NOTIFICATION_FINISHED: "finished",
+ TYPE_PERSONAL_NOTIFICATION_SUCCESS: "success",
+ TYPE_PERSONAL_NOTIFICATION_FAILED: "failed",
+ }
+
+ TYPE_CHOICES = (
+ (TYPE_PERSONAL_NOTIFICATION_TRIGGERED, "Personal notification triggered"),
+ (TYPE_PERSONAL_NOTIFICATION_FINISHED, "Personal notification finished"),
+ (TYPE_PERSONAL_NOTIFICATION_SUCCESS, "Personal notification success"),
+ (TYPE_PERSONAL_NOTIFICATION_FAILED, "Personal notification failed"),
+ )
+
+ (
+ ERROR_NOTIFICATION_NOT_ABLE_TO_SEND_SMS,
+ ERROR_NOTIFICATION_SMS_LIMIT_EXCEEDED,
+ ERROR_NOTIFICATION_NOT_ABLE_TO_CALL,
+ ERROR_NOTIFICATION_PHONE_CALLS_LIMIT_EXCEEDED,
+ ERROR_NOTIFICATION_PHONE_NUMBER_IS_NOT_VERIFIED,
+ ERROR_NOTIFICATION_NOT_ABLE_TO_SEND_MAIL,
+ ERROR_NOTIFICATION_MAIL_LIMIT_EXCEEDED,
+ ERROR_NOTIFICATION_EMAIL_IS_NOT_VERIFIED,
+ ERROR_NOTIFICATION_TELEGRAM_IS_NOT_LINKED_TO_SLACK_ACC,
+ ERROR_NOTIFICATION_PHONE_CALL_LINE_BUSY,
+ ERROR_NOTIFICATION_PHONE_CALL_FAILED,
+ ERROR_NOTIFICATION_PHONE_CALL_NO_ANSWER,
+ ERROR_NOTIFICATION_SMS_DELIVERY_FAILED,
+ ERROR_NOTIFICATION_MAIL_DELIVERY_FAILED,
+ ERROR_NOTIFICATION_TELEGRAM_BOT_IS_DELETED,
+ ERROR_NOTIFICATION_POSTING_TO_SLACK_IS_DISABLED,
+ ERROR_NOTIFICATION_POSTING_TO_TELEGRAM_IS_DISABLED,
+ ERROR_NOTIFICATION_IN_SLACK,
+ ERROR_NOTIFICATION_IN_SLACK_TOKEN_ERROR,
+ ERROR_NOTIFICATION_IN_SLACK_USER_NOT_IN_SLACK,
+ ERROR_NOTIFICATION_IN_SLACK_USER_NOT_IN_CHANNEL,
+ ERROR_NOTIFICATION_TELEGRAM_TOKEN_ERROR,
+ ERROR_NOTIFICATION_IN_SLACK_CHANNEL_IS_ARCHIVED,
+ ERROR_NOTIFICATION_IN_SLACK_RATELIMIT,
+ ERROR_NOTIFICATION_MESSAGING_BACKEND_ERROR,
+ ) = range(25)
+
+ # for this errors we want to send message to general log channel
+ ERRORS_TO_SEND_IN_SLACK_CHANNEL = [
+ ERROR_NOTIFICATION_SMS_LIMIT_EXCEEDED,
+ ERROR_NOTIFICATION_PHONE_CALLS_LIMIT_EXCEEDED,
+ ERROR_NOTIFICATION_MAIL_LIMIT_EXCEEDED,
+ ERROR_NOTIFICATION_PHONE_NUMBER_IS_NOT_VERIFIED,
+ ERROR_NOTIFICATION_EMAIL_IS_NOT_VERIFIED,
+ ]
+
+ type = models.IntegerField(choices=TYPE_CHOICES)
+ author = models.ForeignKey(
+ "user_management.User",
+ on_delete=models.SET_NULL,
+ related_name="personal_log_records",
+ default=None,
+ null=True,
+ )
+
+ # TODO: soft delete notifications_policies -> change SET_NULL to Protect
+
+ notification_policy = models.ForeignKey(
+ "base.UserNotificationPolicy", on_delete=models.SET_NULL, related_name="personal_log_records", null=True
+ )
+
+ alert_group = models.ForeignKey(
+ "alerts.AlertGroup",
+ on_delete=models.CASCADE,
+ related_name="personal_log_records",
+ )
+
+ slack_prevent_posting = models.BooleanField(default=False)
+
+ created_at = models.DateTimeField(auto_now_add=True)
+
+ reason = models.TextField(null=True, default=None)
+
+ notification_error_code = models.PositiveIntegerField(null=True, default=None)
+ notification_step = models.IntegerField(choices=UserNotificationPolicy.Step.choices, null=True, default=None)
+ notification_channel = models.IntegerField(validators=[validate_channel_choice], null=True, default=None)
+
+ def rendered_notification_log_line(self, for_slack=False, html=False):
+ timeline = render_relative_timeline(self.created_at, self.alert_group.started_at)
+
+ if html:
+ result = f"{timeline}: "
+ else:
+ result = f"*{timeline}:* "
+
+ result += self.render_log_line_action(for_slack=for_slack)
+ return result
+
+ @cached_property
+ def rendered_notification_log_line_json(self):
+ time = humanize.naturaldelta(self.alert_group.started_at - self.created_at)
+ created_at = DateTimeField().to_representation(self.created_at)
+ author = self.author.short() if self.author is not None else None
+
+ sf = SlackFormatter(self.alert_group.channel.organization)
+ action = sf.format(self.render_log_line_action(substitute_author_with_tag=True))
+ action = clean_markup(action)
+
+ result = {
+ "time": time,
+ "action": action,
+ "realm": "user_notification",
+ "type": self.type,
+ "created_at": created_at,
+ "author": author,
+ }
+ return result
+
+ def render_log_line_action(self, for_slack=False, substitute_author_with_tag=False):
+ result = ""
+
+ if self.notification_step is not None:
+ notification_step = self.notification_step
+ elif self.notification_policy is not None:
+ notification_step = self.notification_policy.step
+ else:
+ notification_step = None
+
+ if self.notification_channel is not None:
+ notification_channel = self.notification_channel
+ elif self.notification_policy is not None:
+ notification_channel = self.notification_policy.notify_by
+ else:
+ notification_channel = None
+
+ if substitute_author_with_tag:
+ user_verbal = "{{author}}"
+ elif for_slack:
+ user_verbal = self.author.get_user_verbal_for_team_for_slack()
+ else:
+ user_verbal = self.author.username
+
+ if self.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_SUCCESS:
+ if notification_channel == UserNotificationPolicy.NotificationChannel.SMS:
+ result += f"SMS to {user_verbal} was delivered successfully"
+ elif notification_channel == UserNotificationPolicy.NotificationChannel.PHONE_CALL:
+ result += f"phone call to {user_verbal} was successful"
+ # TODO: restore email notifications
+ # elif notification_channel == UserNotificationPolicy.NotificationChannel.EMAIL:
+ # result += f"email to {user_verbal} was delivered successfully"
+ elif notification_channel is None:
+ result += f"notification to {user_verbal} was delivered successfully"
+ elif self.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED:
+ if self.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_SMS_LIMIT_EXCEEDED:
+ result += f"attempt to send an SMS to {user_verbal} has been failed due to a plan limit"
+ elif (
+ self.notification_error_code
+ == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_PHONE_CALLS_LIMIT_EXCEEDED
+ ):
+ result += f"attempt to call to {user_verbal} has been failed due to a plan limit"
+ elif self.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_MAIL_LIMIT_EXCEEDED:
+ result += f"failed to send email to {user_verbal}. Exceeded limit for mails"
+ elif (
+ self.notification_error_code
+ == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_PHONE_NUMBER_IS_NOT_VERIFIED
+ ):
+ if notification_channel == UserNotificationPolicy.NotificationChannel.SMS:
+ result += f"failed to send an SMS to {user_verbal}. Phone number is not verified"
+ elif notification_channel == UserNotificationPolicy.NotificationChannel.PHONE_CALL:
+ result += f"failed to call to {user_verbal}. Phone number is not verified"
+ elif notification_channel is None:
+ result += f"failed to notify {user_verbal}. Phone number is not verified"
+ if self.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_NOT_ABLE_TO_SEND_SMS:
+ result += f"Amixr was not able to send an SMS to {user_verbal}"
+ elif self.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_NOT_ABLE_TO_CALL:
+ result += f"Amixr was not able to call to {user_verbal}"
+ elif (
+ self.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_NOT_ABLE_TO_SEND_MAIL
+ ):
+ result += f"Amixr was not able to send an email to {user_verbal}"
+ elif (
+ self.notification_error_code
+ == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_POSTING_TO_SLACK_IS_DISABLED
+ ):
+ result += f"failed to notify {user_verbal} in Slack, because the incident is not posted to Slack (reason: Slack is disabled for the route)"
+ elif (
+ self.notification_error_code
+ == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_POSTING_TO_TELEGRAM_IS_DISABLED
+ ):
+ result += f"failed to notify {user_verbal} in Telegram, because the incident is not posted to Telegram (reason: Telegram is disabled for the route)"
+ elif (
+ self.notification_error_code
+ == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_TELEGRAM_IS_NOT_LINKED_TO_SLACK_ACC
+ ):
+ result += f"failed to send telegram message to {user_verbal}, because user doesn't have a Telegram account linked"
+ elif (
+ self.notification_error_code
+ == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_TELEGRAM_BOT_IS_DELETED
+ ):
+ result += f"failed to send telegram message to {user_verbal}, because user deleted/stopped the bot"
+ elif (
+ self.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_TELEGRAM_TOKEN_ERROR
+ ):
+ result += f"failed to send telegram message to {user_verbal} due to invalid Telegram token"
+ elif (
+ self.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_PHONE_CALL_LINE_BUSY
+ ):
+ result += f"phone call to {user_verbal} failed, because the line was busy"
+ elif self.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_PHONE_CALL_FAILED:
+ result += f"phone call to {user_verbal} failed, most likely because the phone number was non-existent"
+ elif (
+ self.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_PHONE_CALL_NO_ANSWER
+ ):
+ result += f"phone call to {user_verbal} ended without being answered"
+ elif self.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_SMS_DELIVERY_FAILED:
+ result += f"SMS {user_verbal} was not delivered"
+ elif (
+ self.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_MAIL_DELIVERY_FAILED
+ ):
+ result += f"email to {user_verbal} was not delivered"
+ elif self.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK:
+ result += f"failed to notify {user_verbal} in Slack"
+ elif (
+ self.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_TOKEN_ERROR
+ ):
+ result += f"failed to notify {user_verbal} in Slack, because Slack Integration is not installed"
+ elif (
+ self.notification_error_code
+ == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_USER_NOT_IN_SLACK
+ ):
+ result += f"failed to notify {user_verbal} in Slack, because {user_verbal} is not in Slack"
+ elif (
+ self.notification_error_code
+ == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_USER_NOT_IN_CHANNEL
+ ):
+ result += f"failed to notify {user_verbal} in Slack, because {user_verbal} is not in channel"
+ elif (
+ self.notification_error_code
+ == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_CHANNEL_IS_ARCHIVED
+ ):
+ result += f"failed to notify {user_verbal} in Slack, because channel is archived"
+ elif self.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_RATELIMIT:
+ result += f"failed to notify {user_verbal} in Slack due to Slack rate limit"
+ else:
+ # TODO: handle specific backend errors
+ try:
+ backend_id = UserNotificationPolicy.NotificationChannel(notification_channel).name
+ backend = get_messaging_backend_from_id(backend_id)
+ except ValueError:
+ backend = None
+ result += (
+ f"failed to notify {user_verbal} in {backend.label.lower() if backend else 'disabled backend'}"
+ )
+ elif self.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED:
+ if notification_step == UserNotificationPolicy.Step.NOTIFY:
+ if notification_channel == UserNotificationPolicy.NotificationChannel.SLACK:
+ result += f"invited {user_verbal} in Slack"
+ elif notification_channel == UserNotificationPolicy.NotificationChannel.SMS:
+ result += f"sent sms to {user_verbal}"
+ elif notification_channel == UserNotificationPolicy.NotificationChannel.PHONE_CALL:
+ result += f"called {user_verbal} by phone"
+ elif notification_channel == UserNotificationPolicy.NotificationChannel.TELEGRAM:
+ result += f"sent telegram message to {user_verbal}"
+ # TODO: restore email notifications
+ # elif notification_channel == UserNotificationPolicy.NotificationChannel.EMAIL:
+ # result += f"sent email to {user_verbal}"
+ elif notification_channel == UserNotificationPolicy.NotificationChannel.MOBILE_PUSH_GENERAL:
+ result += f"sent push notifications to {user_verbal}"
+ elif notification_channel == UserNotificationPolicy.NotificationChannel.MOBILE_PUSH_CRITICAL:
+ result += f"sent push critical notifications to {user_verbal}"
+ elif notification_channel is None:
+ result += f"invited {user_verbal} but notification channel is unspecified"
+ else:
+ try:
+ backend_id = UserNotificationPolicy.NotificationChannel(notification_channel).name
+ backend = get_messaging_backend_from_id(backend_id)
+ except ValueError:
+ backend = None
+ result += f"sent {backend.label.lower() if backend else ''} message to {user_verbal}"
+ elif notification_step is None:
+ result += f"escalation triggered for {user_verbal}"
+ return result
+
+
+@receiver(post_save, sender=UserNotificationPolicyLogRecord)
+def listen_for_usernotificationpolicylogrecord_model_save(sender, instance, created, *args, **kwargs):
+ alert_group_pk = instance.alert_group.drop_cached_after_resolve_report_json()
+ if instance.type != UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FINISHED:
+ logger.debug(
+ f"send_update_log_report_signal for alert_group {alert_group_pk}, "
+ f"user notification event: {instance.get_type_display()}"
+ )
+ send_update_log_report_signal.apply_async(kwargs={"alert_group_pk": alert_group_pk}, countdown=10)
diff --git a/engine/apps/base/tasks.py b/engine/apps/base/tasks.py
new file mode 100644
index 0000000000..d73e50c4b4
--- /dev/null
+++ b/engine/apps/base/tasks.py
@@ -0,0 +1,30 @@
+from django.db import transaction
+
+from apps.base.models import FailedToInvokeCeleryTask
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+from common.utils import batch_queryset
+
+
+@shared_dedicated_queue_retry_task
+def process_failed_to_invoke_celery_tasks():
+ task_pks = FailedToInvokeCeleryTask.objects.filter(is_sent=False).values_list("pk", flat=True)
+
+ batches = batch_queryset(task_pks)
+ for idx, batch in enumerate(batches):
+ countdown = idx * 60
+ process_failed_to_invoke_celery_tasks_batch.apply_async((list(batch),), countdown=countdown)
+
+
+@shared_dedicated_queue_retry_task
+def process_failed_to_invoke_celery_tasks_batch(task_pks):
+ sent_task_pks = []
+ with transaction.atomic():
+ for task in FailedToInvokeCeleryTask.objects.filter(pk__in=task_pks, is_sent=False).select_for_update():
+ try:
+ task.send()
+ except Exception:
+ continue
+
+ sent_task_pks.append(task.pk)
+
+ FailedToInvokeCeleryTask.objects.filter(pk__in=sent_task_pks).update(is_sent=True)
diff --git a/engine/apps/base/tests/__init__.py b/engine/apps/base/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/base/tests/factories.py b/engine/apps/base/tests/factories.py
new file mode 100644
index 0000000000..d65496fbb2
--- /dev/null
+++ b/engine/apps/base/tests/factories.py
@@ -0,0 +1,25 @@
+import factory
+
+from apps.base.models import LiveSetting, OrganizationLogRecord, UserNotificationPolicy, UserNotificationPolicyLogRecord
+
+
+class UserNotificationPolicyFactory(factory.DjangoModelFactory):
+ class Meta:
+ model = UserNotificationPolicy
+
+
+class UserNotificationPolicyLogRecordFactory(factory.DjangoModelFactory):
+ class Meta:
+ model = UserNotificationPolicyLogRecord
+
+
+class OrganizationLogRecordFactory(factory.DjangoModelFactory):
+ description = factory.Faker("sentence", nb_words=4)
+
+ class Meta:
+ model = OrganizationLogRecord
+
+
+class LiveSettingFactory(factory.DjangoModelFactory):
+ class Meta:
+ model = LiveSetting
diff --git a/engine/apps/base/tests/messaging_backend.py b/engine/apps/base/tests/messaging_backend.py
new file mode 100644
index 0000000000..48d62dc5eb
--- /dev/null
+++ b/engine/apps/base/tests/messaging_backend.py
@@ -0,0 +1,27 @@
+from apps.alerts.incident_appearance.templaters import AlertWebTemplater
+from apps.base.messaging import BaseMessagingBackend
+
+
+class TestOnlyTemplater(AlertWebTemplater):
+ def _render_for(self):
+ return "testonly"
+
+
+class TestOnlyBackend(BaseMessagingBackend):
+ backend_id = "TESTONLY"
+ label = "Test Only Backend"
+ short_label = "Test"
+ available_for_use = True
+ templater = "apps.base.tests.messaging_backend.TestOnlyTemplater"
+
+ def generate_channel_verification_code(self, organization):
+ return "42"
+
+ def generate_user_verification_code(self, user):
+ return "42"
+
+ def serialize_user(self, user):
+ return {"user": user.username}
+
+ def notify_user(self, user, alert_group, notification_policy):
+ return
diff --git a/engine/apps/base/tests/test_live_settings.py b/engine/apps/base/tests/test_live_settings.py
new file mode 100644
index 0000000000..498be849de
--- /dev/null
+++ b/engine/apps/base/tests/test_live_settings.py
@@ -0,0 +1,72 @@
+from unittest.mock import patch
+
+import pytest
+
+from apps.base.models import LiveSetting
+from apps.base.utils import live_settings
+from apps.twilioapp.twilio_client import TwilioClient
+
+
+@pytest.mark.django_db
+def test_fallback_to_settings(settings):
+ settings.SOME_NEW_FEATURE_ENABLED = True
+
+ with patch.object(LiveSetting, "AVAILABLE_NAMES", ("SOME_NEW_FEATURE_ENABLED",)):
+ assert LiveSetting.get_setting("SOME_NEW_FEATURE_ENABLED") is True
+
+
+@pytest.mark.django_db
+def test_take_from_db(settings):
+ settings.SOME_NEW_FEATURE_ENABLED = True
+
+ with patch.object(LiveSetting, "AVAILABLE_NAMES", ("SOME_NEW_FEATURE_ENABLED",)):
+ LiveSetting.objects.create(name="SOME_NEW_FEATURE_ENABLED", value=False)
+ assert LiveSetting.get_setting("SOME_NEW_FEATURE_ENABLED") is False
+
+
+@pytest.mark.django_db
+def test_restrict_foreign_names():
+ with pytest.raises(ValueError):
+ LiveSetting.objects.create(name="SOME_NONEXISTENT_FANCY_FEATURE_ENABLED", value=42)
+
+ with pytest.raises(ValueError):
+ LiveSetting.get_setting("SOME_NONEXISTENT_FANCY_FEATURE_ENABLED")
+
+
+@pytest.mark.parametrize("value", (True, None, 12, "test string", ["hey", "there", 1]))
+@pytest.mark.django_db
+def test_multi_type_support(value):
+ with patch.object(LiveSetting, "AVAILABLE_NAMES", ("SOME_NEW_FEATURE_ENABLED",)):
+ LiveSetting.objects.create(name="SOME_NEW_FEATURE_ENABLED", value=value)
+ setting_value = LiveSetting.get_setting("SOME_NEW_FEATURE_ENABLED")
+
+ assert type(setting_value) == type(value)
+ assert setting_value == value
+
+
+@pytest.mark.django_db
+def test_live_settings_proxy(settings, monkeypatch):
+ settings.SOME_SETTING = 12
+ monkeypatch.setattr(LiveSetting, "AVAILABLE_NAMES", ("SOME_SETTING",))
+ assert live_settings.SOME_SETTING == 12
+
+ live_settings.SOME_SETTING = 42
+ assert LiveSetting.objects.get(name="SOME_SETTING").value == 42
+ assert live_settings.SOME_SETTING == 42
+
+
+@pytest.mark.django_db
+def test_twilio_respects_changed_credentials(settings):
+ settings.TWILIO_ACCOUNT_SID = "twilio_account_sid"
+ settings.TWILIO_AUTH_TOKEN = "twilio_auth_token"
+ settings.TWILIO_NUMBER = "twilio_number"
+
+ twilio_client = TwilioClient()
+
+ live_settings.TWILIO_ACCOUNT_SID = "new_twilio_account_sid"
+ live_settings.TWILIO_AUTH_TOKEN = "new_twilio_auth_token"
+ live_settings.TWILIO_NUMBER = "new_twilio_number"
+
+ assert twilio_client.twilio_api_client.username == "new_twilio_account_sid"
+ assert twilio_client.twilio_api_client.password == "new_twilio_auth_token"
+ assert twilio_client.twilio_number == "new_twilio_number"
diff --git a/engine/apps/base/tests/test_messaging.py b/engine/apps/base/tests/test_messaging.py
new file mode 100644
index 0000000000..542a8250d1
--- /dev/null
+++ b/engine/apps/base/tests/test_messaging.py
@@ -0,0 +1,19 @@
+import pytest
+
+from apps.base.messaging import get_messaging_backend_from_id, get_messaging_backends
+
+
+@pytest.mark.django_db
+def test_messaging_backends_disabled(settings):
+ settings.FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED = False
+
+ assert get_messaging_backends() == {}
+ assert get_messaging_backend_from_id("TESTONLY") is None
+
+
+@pytest.mark.django_db
+def test_messaging_backends_enabled(settings):
+ settings.FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED = True
+
+ assert get_messaging_backends() != {}
+ assert get_messaging_backend_from_id("TESTONLY") is not None
diff --git a/engine/apps/base/tests/test_organization_log_record.py b/engine/apps/base/tests/test_organization_log_record.py
new file mode 100644
index 0000000000..26d7c7b013
--- /dev/null
+++ b/engine/apps/base/tests/test_organization_log_record.py
@@ -0,0 +1,18 @@
+import pytest
+
+from apps.base.models import OrganizationLogRecord
+
+
+@pytest.mark.django_db
+def test_organization_log_set_general_log_channel(
+ make_organization_with_slack_team_identity, make_user_for_organization, make_slack_channel
+):
+ organization, slack_team_identity = make_organization_with_slack_team_identity()
+ user = make_user_for_organization(organization)
+
+ slack_channel = make_slack_channel(slack_team_identity)
+ organization.set_general_log_channel(slack_channel.slack_id, slack_channel.name, user)
+
+ assert organization.log_records.filter(
+ _labels=[OrganizationLogRecord.LABEL_SLACK, OrganizationLogRecord.LABEL_DEFAULT_CHANNEL]
+ ).exists()
diff --git a/engine/apps/base/tests/test_user_notification_policy.py b/engine/apps/base/tests/test_user_notification_policy.py
new file mode 100644
index 0000000000..5d0e1df70d
--- /dev/null
+++ b/engine/apps/base/tests/test_user_notification_policy.py
@@ -0,0 +1,82 @@
+import pytest
+from django.utils.timezone import timedelta
+
+from apps.base.models import UserNotificationPolicy
+from apps.base.models.user_notification_policy import (
+ NotificationChannelAPIOptions,
+ NotificationChannelOptions,
+ NotificationChannelPublicAPIOptions,
+ validate_channel_choice,
+)
+from apps.base.tests.messaging_backend import TestOnlyBackend
+
+
+@pytest.mark.parametrize(
+ "notification_type,kwargs, expected_verbal",
+ [
+ (
+ UserNotificationPolicy.Step.WAIT,
+ {
+ "wait_delay": timedelta(minutes=5),
+ },
+ "5 min",
+ ),
+ (UserNotificationPolicy.Step.NOTIFY, {"notify_by": UserNotificationPolicy.NotificationChannel.SLACK}, "Slack"),
+ (UserNotificationPolicy.Step.WAIT, {}, "0 min"),
+ (None, {}, "Not set"),
+ ],
+)
+@pytest.mark.django_db
+def test_short_verbal(
+ make_organization,
+ make_user_for_organization,
+ make_user_notification_policy,
+ notification_type,
+ kwargs,
+ expected_verbal,
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization)
+
+ policy = make_user_notification_policy(user, notification_type, **kwargs)
+ assert policy.short_verbal == expected_verbal
+
+
+@pytest.mark.django_db
+def test_short_verbals_for_user(
+ make_organization,
+ make_user_for_organization,
+ make_user_notification_policy,
+):
+ organization = make_organization()
+ user = make_user_for_organization(organization)
+
+ make_user_notification_policy(
+ user, UserNotificationPolicy.Step.NOTIFY, notify_by=UserNotificationPolicy.NotificationChannel.SLACK
+ )
+
+ make_user_notification_policy(user, UserNotificationPolicy.Step.WAIT, wait_delay=timedelta(minutes=5))
+
+ make_user_notification_policy(
+ user,
+ UserNotificationPolicy.Step.NOTIFY,
+ notify_by=UserNotificationPolicy.NotificationChannel.SMS,
+ important=True,
+ )
+
+ expected = (("Slack", "5 min"), ("SMS",))
+ assert UserNotificationPolicy.get_short_verbals_for_user(user) == expected
+
+
+@pytest.mark.django_db
+def test_extra_messaging_backends_details():
+ assert TestOnlyBackend.backend_id in UserNotificationPolicy.NotificationChannel.names
+ assert TestOnlyBackend.backend_id not in NotificationChannelOptions.AVAILABLE_FOR_USE
+ channel_choice = getattr(UserNotificationPolicy.NotificationChannel, TestOnlyBackend.backend_id)
+ assert NotificationChannelAPIOptions.LABELS[channel_choice] == "Test Only Backend"
+ assert NotificationChannelAPIOptions.SHORT_LABELS[channel_choice] == TestOnlyBackend.short_label
+ assert NotificationChannelPublicAPIOptions.LABELS[channel_choice] == "notify_by_{}".format(
+ TestOnlyBackend.backend_id.lower()
+ )
+
+ assert validate_channel_choice(channel_choice) is None
diff --git a/engine/apps/base/tests/test_user_notification_policy_log_record.py b/engine/apps/base/tests/test_user_notification_policy_log_record.py
new file mode 100644
index 0000000000..7c8c893943
--- /dev/null
+++ b/engine/apps/base/tests/test_user_notification_policy_log_record.py
@@ -0,0 +1,63 @@
+import pytest
+
+from apps.base.models import UserNotificationPolicy, UserNotificationPolicyLogRecord
+from apps.base.tests.messaging_backend import TestOnlyBackend
+
+
+@pytest.mark.django_db
+def test_extra_messaging_backends_error_log(
+ make_organization,
+ make_user,
+ make_user_notification_policy,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_user_notification_policy_log_record,
+):
+ organization = make_organization()
+ user_1 = make_user(organization=organization)
+ user_notification_policy = make_user_notification_policy(
+ user=user_1,
+ step=UserNotificationPolicy.Step.NOTIFY,
+ notify_by=UserNotificationPolicy.NotificationChannel.TESTONLY,
+ )
+ alert_receive_channel = make_alert_receive_channel(organization=organization)
+ alert_group = make_alert_group(alert_receive_channel=alert_receive_channel)
+ log_record = make_user_notification_policy_log_record(
+ author=user_1,
+ alert_group=alert_group,
+ notification_policy=user_notification_policy,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_MESSAGING_BACKEND_ERROR,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ )
+
+ output = log_record.render_log_line_action()
+ assert output == f"failed to notify {user_1.username} in {TestOnlyBackend.label.lower()}"
+
+
+@pytest.mark.django_db
+def test_extra_messaging_backends_sent_log(
+ make_organization,
+ make_user,
+ make_user_notification_policy,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_user_notification_policy_log_record,
+):
+ organization = make_organization()
+ user_1 = make_user(organization=organization)
+ user_notification_policy = make_user_notification_policy(
+ user=user_1,
+ step=UserNotificationPolicy.Step.NOTIFY,
+ notify_by=UserNotificationPolicy.NotificationChannel.TESTONLY,
+ )
+ alert_receive_channel = make_alert_receive_channel(organization=organization)
+ alert_group = make_alert_group(alert_receive_channel=alert_receive_channel)
+ log_record = make_user_notification_policy_log_record(
+ author=user_1,
+ alert_group=alert_group,
+ notification_policy=user_notification_policy,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED,
+ )
+
+ output = log_record.render_log_line_action()
+ assert output == f"sent {TestOnlyBackend.label.lower()} message to {user_1.username}"
diff --git a/engine/apps/base/utils.py b/engine/apps/base/utils.py
new file mode 100644
index 0000000000..7342d00ee1
--- /dev/null
+++ b/engine/apps/base/utils.py
@@ -0,0 +1,125 @@
+import json
+import re
+
+from django.apps import apps
+from python_http_client import UnauthorizedError
+from sendgrid import SendGridAPIClient
+from telegram import Bot
+from twilio.base.exceptions import TwilioException
+from twilio.rest import Client
+
+
+class LiveSettingProxy:
+ def __dir__(self):
+ LiveSetting = apps.get_model("base", "LiveSetting")
+ return LiveSetting.AVAILABLE_NAMES
+
+ def __getattr__(self, item):
+ LiveSetting = apps.get_model("base", "LiveSetting")
+
+ value = LiveSetting.get_setting(item)
+ return value
+
+ def __setattr__(self, key, value):
+ LiveSetting = apps.get_model("base", "LiveSetting")
+ LiveSetting.objects.update_or_create(name=key, defaults={"value": value})
+
+
+live_settings = LiveSettingProxy()
+
+
+class LiveSettingValidator:
+ def __init__(self, live_setting):
+ self.live_setting = live_setting
+
+ def get_error(self):
+ check_fn_name = f"_check_{self.live_setting.name.lower()}"
+
+ if self.live_setting.value is None:
+ return "Empty"
+
+ # skip validation if there's no handler for it
+ if not hasattr(self, check_fn_name):
+ return None
+
+ check_fn = getattr(self, check_fn_name)
+ return check_fn(self.live_setting.value)
+
+ @classmethod
+ def _check_twilio_account_sid(cls, twilio_account_sid):
+ try:
+ Client(twilio_account_sid, live_settings.TWILIO_AUTH_TOKEN).api.accounts.list(limit=1)
+ except Exception as e:
+ return cls._prettify_twilio_error(e)
+
+ @classmethod
+ def _check_twilio_auth_token(cls, twilio_auth_token):
+ try:
+ Client(live_settings.TWILIO_ACCOUNT_SID, twilio_auth_token).api.accounts.list(limit=1)
+ except Exception as e:
+ return cls._prettify_twilio_error(e)
+
+ @classmethod
+ def _check_twilio_verify_service_sid(cls, twilio_verify_service_sid):
+ try:
+ twilio_client = Client(live_settings.TWILIO_ACCOUNT_SID, live_settings.TWILIO_AUTH_TOKEN)
+ twilio_client.verify.services(twilio_verify_service_sid).rate_limits.list(limit=1)
+ except Exception as e:
+ return cls._prettify_twilio_error(e)
+
+ @classmethod
+ def _check_twilio_number(cls, twilio_number):
+ if not cls._is_phone_number_valid(twilio_number):
+ return "Please specify a valid phone number in the following format: +XXXXXXXXXXX"
+
+ @classmethod
+ def _check_sendgrid_api_key(cls, sendgrid_api_key):
+ sendgrid_client = SendGridAPIClient(sendgrid_api_key)
+
+ try:
+ sendgrid_client.client.mail_settings.get()
+ except Exception as e:
+ return cls._prettify_sendgrid_error(e)
+
+ @classmethod
+ def _check_sendgrid_from_email(cls, sendgrid_from_email):
+ if not cls._is_email_valid(sendgrid_from_email):
+ return "Please specify a valid email"
+
+ @classmethod
+ def _check_telegram_token(cls, telegram_token):
+ try:
+ bot = Bot(telegram_token)
+ bot.get_me()
+ except Exception as e:
+ return f"Telegram error: {str(e)}"
+
+ @staticmethod
+ def _is_email_valid(email):
+ return re.match(r"^[^@]+@[^@]+\.[^@]+$", email)
+
+ @staticmethod
+ def _is_phone_number_valid(phone_number):
+ return re.match(r"^\+\d{11}$", phone_number)
+
+ @staticmethod
+ def _prettify_twilio_error(exc):
+ if isinstance(exc, TwilioException):
+ if len(exc.args) > 1:
+ response_content = exc.args[1].content
+ content = json.loads(response_content)
+
+ error_code = content["code"]
+ more_info = content["more_info"]
+ return f"Twilio error: code {error_code}. Learn more: {more_info}"
+ else:
+ return f"Twilio error: {exc.args[0]}"
+ else:
+ return f"Twilio error: {str(exc)}"
+
+ @staticmethod
+ def _prettify_sendgrid_error(exc):
+ if isinstance(exc, UnauthorizedError):
+ return "Sendgrid error: couldn't authorize with given credentials"
+ else:
+ return f"Sendgrid error: {str(exc)}"
diff --git a/engine/apps/grafana_plugin/__init__.py b/engine/apps/grafana_plugin/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/grafana_plugin/helpers/__init__.py b/engine/apps/grafana_plugin/helpers/__init__.py
new file mode 100644
index 0000000000..dc55d6fd05
--- /dev/null
+++ b/engine/apps/grafana_plugin/helpers/__init__.py
@@ -0,0 +1,2 @@
+from .client import GcomAPIClient # noqa: F401
+from .client import GrafanaAPIClient # noqa: F401
diff --git a/engine/apps/grafana_plugin/helpers/client.py b/engine/apps/grafana_plugin/helpers/client.py
new file mode 100644
index 0000000000..bb4586dae0
--- /dev/null
+++ b/engine/apps/grafana_plugin/helpers/client.py
@@ -0,0 +1,139 @@
+import json
+import logging
+import time
+from typing import Optional, Tuple
+from urllib.parse import urljoin
+
+import requests
+from django.conf import settings
+from rest_framework import status
+from rest_framework.response import Response
+
+logger = logging.getLogger(__name__)
+
+
+class APIClient:
+ def __init__(self, api_url: str, api_token: str):
+ self.api_url = api_url
+ self.api_token = api_token
+
+ def api_get(self, endpoint: str) -> Tuple[Optional[Response], dict]:
+ return self.call_api(endpoint, requests.get)
+
+ def api_post(self, endpoint: str, body: dict = None) -> Tuple[Optional[Response], dict]:
+ return self.call_api(endpoint, requests.post, body)
+
+ def call_api(self, endpoint: str, http_method, body: dict = None) -> Tuple[Optional[Response], dict]:
+ request_start = time.perf_counter()
+ call_status = {
+ "url": urljoin(self.api_url, endpoint),
+ "connected": False,
+ "status_code": status.HTTP_503_SERVICE_UNAVAILABLE,
+ "message": "",
+ }
+ try:
+ response = http_method(call_status["url"], json=body, headers=self.request_headers)
+ call_status["status_code"] = response.status_code
+ response.raise_for_status()
+
+ call_status["connected"] = True
+ call_status["message"] = response.reason
+
+ if response.status_code == status.HTTP_204_NO_CONTENT:
+ return {}, call_status
+
+ return response.json(), call_status
+ except (
+ requests.exceptions.ConnectionError,
+ requests.exceptions.HTTPError,
+ requests.exceptions.TooManyRedirects,
+ json.JSONDecodeError,
+ ) as e:
+ logger.warning("Error connecting to api instance " + str(e))
+ call_status["message"] = "{0}".format(e)
+ finally:
+ request_end = time.perf_counter()
+ status_code = call_status["status_code"]
+ url = call_status["url"]
+ seconds = request_end - request_start
+ logging.info(
+ f"outbound latency={str(seconds)} status={status_code} "
+ f"method={http_method.__name__.upper()} url={url} "
+ f"slow={int(seconds > settings.SLOW_THRESHOLD_SECONDS)} "
+ )
+ return None, call_status
+
+ @property
+ def request_headers(self) -> dict:
+ return {"User-Agent": settings.GRAFANA_COM_USER_AGENT, "Authorization": f"Bearer {self.api_token}"}
+
+
+class GrafanaAPIClient(APIClient):
+ def __init__(self, api_url: str, api_token: str):
+ super().__init__(api_url, api_token)
+
+ def check_token(self) -> Tuple[Optional[Response], dict]:
+ return self.api_get("api/org")
+
+ def get_users(self) -> Tuple[Optional[Response], dict]:
+ """
+ Response example:
+ [
+ {
+ 'orgId': 1,
+ 'userId': 1,
+ 'email': 'user@example.com',
+ 'name': 'User User',
+ 'avatarUrl': '/avatar/79163f696e9e08958c0d3f73c160e2cc',
+ 'login': 'user',
+ 'role': 'Admin',
+ 'lastSeenAt': '2021-06-21T07:01:45Z',
+ 'lastSeenAtAge': '9m'
+ },
+ ]
+ """
+ return self.api_get("api/org/users")
+
+ def get_teams(self):
+ return self.api_get("api/teams/search?perpage=1000000")
+
+ def get_team_members(self, team_id):
+ return self.api_get(f"api/teams/{team_id}/members")
+
+ def get_datasources(self):
+ return self.api_get("api/datasources")
+
+ def get_datasource(self, datasource_id):
+ return self.api_get(f"api/datasources/{datasource_id}")
+
+ def get_alertmanager_status_with_config(self, recipient):
+ return self.api_get(f"api/alertmanager/{recipient}/api/v2/status")
+
+ def get_alerting_config(self, recipient):
+ return self.api_get(f"api/alertmanager/{recipient}/config/api/v1/alerts")
+
+ def update_alerting_config(self, config, recipient):
+ return self.api_post(f"api/alertmanager/{recipient}/config/api/v1/alerts", config)
+
+
+class GcomAPIClient(APIClient):
+ STACK_STATUS_DELETED = "deleted"
+
+ def __init__(self, api_token: str):
+ super().__init__(settings.GRAFANA_COM_API_URL, api_token)
+
+ def check_token(self):
+ return self.api_post("api-keys/check", {"token": self.api_token})
+
+ def get_instance_info(self, stack_id: str):
+ return self.api_get(f"instances/{stack_id}")
+
+ def get_active_instances(self):
+ return self.api_get("instances?status=active")
+
+ def is_stack_deleted(self, stack_id: str) -> bool:
+ instance_info, call_status = self.get_instance_info(stack_id)
+ return instance_info and instance_info.get("status") == self.STACK_STATUS_DELETED
+
+ def post_active_users(self, body):
+ return self.api_post("app-active-users", body)
diff --git a/engine/apps/grafana_plugin/helpers/gcom.py b/engine/apps/grafana_plugin/helpers/gcom.py
new file mode 100644
index 0000000000..702f08ded5
--- /dev/null
+++ b/engine/apps/grafana_plugin/helpers/gcom.py
@@ -0,0 +1,103 @@
+import logging
+from typing import Optional, Tuple
+
+from django.apps import apps
+from django.conf import settings
+from django.utils import timezone
+
+from apps.auth_token.exceptions import InvalidToken
+from apps.auth_token.models import PluginAuthToken
+from apps.grafana_plugin.helpers import GcomAPIClient
+from apps.user_management.models import Organization
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+GCOM_TOKEN_CHECK_PERIOD = timezone.timedelta(minutes=60)
+
+
+class GcomToken:
+ def __init__(self, organization):
+ self.organization = organization
+
+
+def check_gcom_permission(token_string: str, context) -> Optional["GcomToken"]:
+ """
+ Verify that request from plugin is valid. Check it and synchronize the organization details
+ with gcom every GCOM_TOKEN_CHECK_PERIOD.
+ """
+
+ stack_id = context["stack_id"]
+ org_id = context["org_id"]
+ organization = Organization.objects.filter(stack_id=stack_id, org_id=org_id).first()
+ if (
+ organization
+ and organization.gcom_token == token_string
+ and organization.gcom_token_org_last_time_synced
+ and timezone.now() - organization.gcom_token_org_last_time_synced < GCOM_TOKEN_CHECK_PERIOD
+ ):
+ logger.debug(f"Allow request without calling gcom api for org={org_id}, stack_id={stack_id}")
+ return GcomToken(organization)
+
+ logger.debug(f"Start authenticate by making request to gcom api for org={org_id}, stack_id={stack_id}")
+ client = GcomAPIClient(token_string)
+ instance_info, status = client.get_instance_info(stack_id)
+ if not instance_info or str(instance_info["orgId"]) != org_id:
+ raise InvalidToken
+
+ if not organization:
+ DynamicSetting = apps.get_model("base", "DynamicSetting")
+ allow_signup = DynamicSetting.objects.get_or_create(
+ name="allow_plugin_organization_signup", defaults={"boolean_value": True}
+ )[0].boolean_value
+ if allow_signup:
+ organization = Organization.objects.create(
+ stack_id=str(instance_info["id"]),
+ stack_slug=instance_info["slug"],
+ grafana_url=instance_info["url"],
+ org_id=str(instance_info["orgId"]),
+ org_slug=instance_info["orgSlug"],
+ org_title=instance_info["orgName"],
+ gcom_token=token_string,
+ gcom_token_org_last_time_synced=timezone.now(),
+ )
+ else:
+ organization.stack_slug = instance_info["slug"]
+ organization.org_slug = instance_info["orgSlug"]
+ organization.org_title = instance_info["orgName"]
+ organization.grafana_url = instance_info["url"]
+ organization.gcom_token = token_string
+ organization.gcom_token_org_last_time_synced = timezone.now()
+ organization.save(
+ update_fields=[
+ "stack_slug",
+ "org_slug",
+ "org_title",
+ "grafana_url",
+ "gcom_token",
+ "gcom_token_org_last_time_synced",
+ ]
+ )
+ logger.debug(f"Finish authenticate by making request to gcom api for org={org_id}, stack_id={stack_id}")
+ return GcomToken(organization)
+
+
+def check_token(token_string: str, context: dict):
+ token_parts = token_string.split(":")
+ if len(token_parts) > 1 and token_parts[0] == "gcom":
+ return check_gcom_permission(token_parts[1], context)
+ else:
+ return PluginAuthToken.validate_token_string(token_string, context=context)
+
+
+def get_active_instance_ids() -> Tuple[Optional[set], bool]:
+ if not settings.GRAFANA_COM_API_TOKEN or settings.LICENSE != settings.CLOUD_LICENSE_NAME:
+ return None, False
+
+ client = GcomAPIClient(settings.GRAFANA_COM_API_TOKEN)
+ active_instances, status = client.get_active_instances()
+
+ if not active_instances:
+ return None, True
+
+ active_ids = set(i["id"] for i in active_instances["items"])
+ return active_ids, True
diff --git a/engine/apps/grafana_plugin/permissions.py b/engine/apps/grafana_plugin/permissions.py
new file mode 100644
index 0000000000..b4ea8b5e93
--- /dev/null
+++ b/engine/apps/grafana_plugin/permissions.py
@@ -0,0 +1,50 @@
+import json
+import logging
+
+from django.apps import apps
+from django.views import View
+from rest_framework import permissions
+from rest_framework.authentication import get_authorization_header
+from rest_framework.request import Request
+
+from apps.auth_token.exceptions import InvalidToken
+from apps.grafana_plugin.helpers.gcom import check_token
+
+logger = logging.getLogger(__name__)
+
+
+class PluginTokenVerified(permissions.BasePermission):
+
+ # The grafana plugin can either use a token from gcom or one generated internally by oncall
+ # Tokens from gcom will be prefixed with gcom: otherwise they will be treated as local
+ def has_permission(self, request: Request, view: View) -> bool:
+ token_string = get_authorization_header(request).decode()
+ context = json.loads(request.headers.get("X-Instance-Context"))
+ try:
+ auth_token = check_token(token_string, context)
+ if auth_token:
+ return True
+ except InvalidToken:
+ logger.warning(f"Invalid token used: {context}")
+
+ return False
+
+
+class SelfHostedInvitationTokenVerified(permissions.BasePermission):
+ def has_permission(self, request: Request, view: View) -> bool:
+ DynamicSetting = apps.get_model("base", "DynamicSetting")
+ self_hosted_settings = DynamicSetting.objects.get_or_create(
+ name="self_hosted_invitations",
+ defaults={
+ "json_value": {
+ "keys": [],
+ }
+ },
+ )[0]
+ token_string = get_authorization_header(request).decode()
+ try:
+ return token_string in self_hosted_settings.json_value["keys"]
+ except InvalidToken:
+ logger.warning(f"Invalid token used")
+
+ return False
diff --git a/engine/apps/grafana_plugin/tasks/__init__.py b/engine/apps/grafana_plugin/tasks/__init__.py
new file mode 100644
index 0000000000..8ba4f62ec7
--- /dev/null
+++ b/engine/apps/grafana_plugin/tasks/__init__.py
@@ -0,0 +1 @@
+from .sync import start_sync_organizations, sync_organization_async # noqa: F401
diff --git a/engine/apps/grafana_plugin/tasks/sync.py b/engine/apps/grafana_plugin/tasks/sync.py
new file mode 100644
index 0000000000..2d6c37bd3a
--- /dev/null
+++ b/engine/apps/grafana_plugin/tasks/sync.py
@@ -0,0 +1,78 @@
+import logging
+
+from celery.utils.log import get_task_logger
+from django.conf import settings
+from django.utils import timezone
+
+from apps.grafana_plugin.helpers import GcomAPIClient
+from apps.grafana_plugin.helpers.gcom import get_active_instance_ids
+from apps.public_api.constants import DEMO_ORGANIZATION_ID
+from apps.user_management.models import Organization
+from apps.user_management.sync import sync_organization
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+logger = get_task_logger(__name__)
+logger.setLevel(logging.DEBUG)
+
+# celery beat will schedule start_sync_organizations for every 30 minutes
+# to make sure that orgs are synced every 30 minutes, SYNC_PERIOD should be a little lower
+SYNC_PERIOD = timezone.timedelta(minutes=25)
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=5)
+def start_sync_organizations():
+ sync_threshold = timezone.now() - SYNC_PERIOD
+
+ organization_qs = Organization.objects.exclude(public_primary_key=DEMO_ORGANIZATION_ID).filter(
+ last_time_synced__lte=sync_threshold
+ )
+
+ active_instance_ids, is_cloud_configured = get_active_instance_ids()
+ if is_cloud_configured:
+ if not active_instance_ids:
+ logger.warning("Did not find any active instances!")
+ return
+ else:
+ logger.debug(f"Found {len(active_instance_ids)} active instances")
+ organization_qs = organization_qs.filter(stack_id__in=active_instance_ids)
+
+ organization_pks = organization_qs.values_list("pk", flat=True)
+
+ max_countdown = 25 * 60 # SYNC_PERIOD minutes -> Seconds
+ for idx, organization_pk in enumerate(organization_pks):
+ countdown = idx % max_countdown # Spread orgs evenly along SYNC_PERIOD
+ sync_organization_async.apply_async((organization_pk,), countdown=countdown)
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=3)
+def sync_organization_async(organization_pk):
+ run_organization_sync(organization_pk, False)
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), max_retries=1)
+def plugin_sync_organization_async(organization_pk):
+ run_organization_sync(organization_pk, True)
+
+
+def run_organization_sync(organization_pk, force_sync):
+ logger.info(f"Start sync Organization {organization_pk}")
+
+ try:
+ organization = Organization.objects.get(pk=organization_pk)
+ except Organization.DoesNotExist:
+ logger.info(f"Organization {organization_pk} was not found")
+ return
+
+ if not force_sync:
+ if organization.last_time_synced and timezone.now() - organization.last_time_synced < SYNC_PERIOD:
+ logger.debug(f"Canceling sync for Organization {organization_pk}, since it was synced recently.")
+ return
+ if settings.GRAFANA_COM_API_TOKEN and settings.LICENSE == settings.CLOUD_LICENSE_NAME:
+ client = GcomAPIClient(settings.GRAFANA_COM_API_TOKEN)
+ instance_info, status = client.get_instance_info(organization.stack_id)
+ if not instance_info or instance_info["status"] != "active":
+ logger.debug(f"Canceling sync for Organization {organization_pk}, as it is no longer active.")
+ return
+
+ sync_organization(organization)
+ logger.info(f"Finish sync Organization {organization_pk}")
diff --git a/engine/apps/grafana_plugin/tests/test_sync.py b/engine/apps/grafana_plugin/tests/test_sync.py
new file mode 100644
index 0000000000..f37b2e419f
--- /dev/null
+++ b/engine/apps/grafana_plugin/tests/test_sync.py
@@ -0,0 +1,100 @@
+from unittest.mock import patch
+
+import pytest
+from django.conf import settings
+from django.test.utils import override_settings
+from django.utils import timezone
+
+from apps.grafana_plugin.tasks.sync import run_organization_sync
+
+
+class SyncOrganization(object):
+ called = False
+ org = None
+
+ def do_sync_organization(self, org):
+ self.called = True
+ self.org = org
+
+ def reset(self):
+ self.called = False
+ self.org = None
+
+
+class TestGcomAPIClient:
+ called = False
+ info = None
+ status = None
+
+ def reset(self):
+ self.called = False
+ self.info = None
+ self.status = None
+
+ def set_info(self, info):
+ self.info = info
+
+ def set_status(self, status):
+ self.status = status
+
+ def get_instance_info(self, stack_id: str):
+ self.called = True
+ return self.info, self.status
+
+
+@pytest.mark.django_db
+def test_sync_organization_skip(
+ make_organization,
+ make_token_for_organization,
+):
+ organization = make_organization()
+ syncer = SyncOrganization()
+ with patch("apps.grafana_plugin.tasks.sync.sync_organization", new=lambda org: syncer.do_sync_organization(org)):
+ run_organization_sync(organization.id, True) # Call for existing org (forced)
+ assert syncer.called and syncer.org == organization
+ syncer.reset()
+
+ run_organization_sync(123321, True) # Not called for non-existing org
+ assert not syncer.called and not syncer.org
+ syncer.reset()
+
+ run_organization_sync(organization.id, False) # Call for new org
+ assert syncer.called and syncer.org == organization
+ syncer.reset()
+
+ organization.last_time_synced = timezone.now()
+ organization.save(update_fields=["last_time_synced"])
+ run_organization_sync(organization.id, False) # Not called for recently synced org
+ assert not syncer.called and not syncer.org
+ syncer.reset()
+
+
+@override_settings(GRAFANA_COM_API_TOKEN="TestGrafanaComToken")
+@override_settings(LICENSE=settings.CLOUD_LICENSE_NAME)
+@pytest.mark.django_db
+def test_sync_organization_skip_cloud(
+ make_organization,
+ make_token_for_organization,
+):
+ organization = make_organization()
+ syncer = SyncOrganization()
+ test_client = TestGcomAPIClient()
+
+ with patch("apps.grafana_plugin.tasks.sync.sync_organization", new=lambda org: syncer.do_sync_organization(org)):
+ with patch("apps.grafana_plugin.tasks.sync.GcomAPIClient", new=lambda api_token: test_client):
+ test_client.info = {"status": "active"}
+ run_organization_sync(organization.id, False) # Called since instance info is active in cloud
+ assert test_client.called and syncer.called and syncer.org == organization
+ syncer.reset()
+ test_client.reset()
+
+ test_client.info = {"status": "paused"}
+ run_organization_sync(organization.id, False) # Not called since status != active in cloud
+ assert test_client.called and not syncer.called and not syncer.org
+ syncer.reset()
+ test_client.reset()
+
+ run_organization_sync(organization.id, False) # Not called since status was none in cloud
+ assert test_client.called and not syncer.called and not syncer.org
+ syncer.reset()
+ test_client.reset()
diff --git a/engine/apps/grafana_plugin/urls.py b/engine/apps/grafana_plugin/urls.py
new file mode 100644
index 0000000000..347f8e9d7d
--- /dev/null
+++ b/engine/apps/grafana_plugin/urls.py
@@ -0,0 +1,19 @@
+from django.urls import re_path
+
+from apps.grafana_plugin.views import (
+ InstallView,
+ PluginSyncView,
+ SelfHostedInstallView,
+ StatusView,
+ SyncOrganizationView,
+)
+
+app_name = "grafana-plugin"
+
+urlpatterns = [
+ re_path(r"self-hosted/install/?", SelfHostedInstallView().as_view()),
+ re_path(r"status/?", StatusView().as_view()),
+ re_path(r"install/?", InstallView().as_view()),
+ re_path(r"sync_organization/?", SyncOrganizationView().as_view()),
+ re_path(r"sync/?", PluginSyncView().as_view()),
+]
diff --git a/engine/apps/grafana_plugin/views/__init__.py b/engine/apps/grafana_plugin/views/__init__.py
new file mode 100644
index 0000000000..3b3f1a3ac4
--- /dev/null
+++ b/engine/apps/grafana_plugin/views/__init__.py
@@ -0,0 +1,5 @@
+from .install import InstallView # noqa: F401
+from .self_hosted_install import SelfHostedInstallView # noqa: F401
+from .status import StatusView # noqa: F401
+from .sync import PluginSyncView # noqa: F401
+from .sync_organization import SyncOrganizationView # noqa: F401
diff --git a/engine/apps/grafana_plugin/views/install.py b/engine/apps/grafana_plugin/views/install.py
new file mode 100644
index 0000000000..b8b883fe7a
--- /dev/null
+++ b/engine/apps/grafana_plugin/views/install.py
@@ -0,0 +1,24 @@
+from rest_framework import status
+from rest_framework.request import Request
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.grafana_plugin.permissions import PluginTokenVerified
+from apps.user_management.models import Organization
+from apps.user_management.sync import sync_organization
+from common.api_helpers.mixins import GrafanaHeadersMixin
+
+
+class InstallView(GrafanaHeadersMixin, APIView):
+ permission_classes = (PluginTokenVerified,)
+
+ def post(self, request: Request) -> Response:
+ stack_id = self.instance_context["stack_id"]
+ org_id = self.instance_context["org_id"]
+
+ organization = Organization.objects.filter(stack_id=stack_id, org_id=org_id).first()
+ organization.api_token = self.instance_context["grafana_token"]
+ organization.save(update_fields=["api_token"])
+
+ sync_organization(organization)
+ return Response(status=status.HTTP_204_NO_CONTENT)
diff --git a/engine/apps/grafana_plugin/views/self_hosted_install.py b/engine/apps/grafana_plugin/views/self_hosted_install.py
new file mode 100644
index 0000000000..16dbd7bc9a
--- /dev/null
+++ b/engine/apps/grafana_plugin/views/self_hosted_install.py
@@ -0,0 +1,55 @@
+from django.apps import apps
+from django.conf import settings
+from rest_framework import status
+from rest_framework.authentication import get_authorization_header
+from rest_framework.request import Request
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.grafana_plugin.permissions import SelfHostedInvitationTokenVerified
+from apps.user_management.models import Organization
+from apps.user_management.sync import sync_organization
+from common.api_helpers.mixins import GrafanaHeadersMixin
+
+
+class SelfHostedInstallView(GrafanaHeadersMixin, APIView):
+ permission_classes = (SelfHostedInvitationTokenVerified,)
+
+ def remove_invitation_token(self, token):
+ DynamicSetting = apps.get_model("base", "DynamicSetting")
+ self_hosted_settings = DynamicSetting.objects.get_or_create(
+ name="self_hosted_invitations",
+ defaults={
+ "json_value": {
+ "keys": [],
+ }
+ },
+ )[0]
+ self_hosted_settings.json_value["keys"].remove(token)
+ self_hosted_settings.save(update_fields=["json_value"])
+
+ def post(self, request: Request) -> Response:
+ token_string = get_authorization_header(request).decode()
+ stack_id = settings.SELF_HOSTED_SETTINGS["STACK_ID"]
+ org_id = settings.SELF_HOSTED_SETTINGS["ORG_ID"]
+
+ organization = Organization.objects.filter(stack_id=stack_id, org_id=org_id).first()
+ if organization:
+ organization.revoke_plugin()
+ organization.grafana_url = self.instance_context["grafana_url"]
+ organization.api_token = self.instance_context["grafana_token"]
+ organization.save(update_fields=["grafana_url", "api_token"])
+ else:
+ organization = Organization.objects.create(
+ stack_id=stack_id,
+ stack_slug=settings.SELF_HOSTED_SETTINGS["STACK_SLUG"],
+ org_id=org_id,
+ org_slug=settings.SELF_HOSTED_SETTINGS["ORG_SLUG"],
+ org_title=settings.SELF_HOSTED_SETTINGS["ORG_TITLE"],
+ grafana_url=self.instance_context["grafana_url"],
+ api_token=self.instance_context["grafana_token"],
+ )
+ sync_organization(organization)
+ provisioning_info = organization.provision_plugin()
+ self.remove_invitation_token(token_string)
+ return Response(data=provisioning_info, status=status.HTTP_201_CREATED)
diff --git a/engine/apps/grafana_plugin/views/status.py b/engine/apps/grafana_plugin/views/status.py
new file mode 100644
index 0000000000..e202e33768
--- /dev/null
+++ b/engine/apps/grafana_plugin/views/status.py
@@ -0,0 +1,52 @@
+from django.apps import apps
+from django.conf import settings
+from rest_framework import status
+from rest_framework.request import Request
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.grafana_plugin.helpers import GrafanaAPIClient
+from apps.grafana_plugin.permissions import PluginTokenVerified
+from apps.user_management.models import Organization
+from common.api_helpers.mixins import GrafanaHeadersMixin
+
+
+class StatusView(GrafanaHeadersMixin, APIView):
+ permission_classes = (PluginTokenVerified,)
+
+ def get(self, request: Request) -> Response:
+ stack_id = self.instance_context["stack_id"]
+ org_id = self.instance_context["org_id"]
+ is_installed = False
+ connected_to_grafana = False
+ token_ok = False
+ allow_signup = True
+ organization = Organization.objects.filter(stack_id=stack_id, org_id=org_id).first()
+ if organization:
+ is_installed = True
+ client = GrafanaAPIClient(api_url=organization.grafana_url, api_token=organization.api_token)
+ token_info, client_status = client.check_token()
+ connected_to_grafana = (
+ client_status["connected"]
+ or client_status["status_code"] == status.HTTP_401_UNAUTHORIZED
+ or client_status["status_code"] == status.HTTP_403_FORBIDDEN
+ )
+ if token_info:
+ token_ok = True
+ else:
+ DynamicSetting = apps.get_model("base", "DynamicSetting")
+ allow_signup = DynamicSetting.objects.get_or_create(
+ name="allow_plugin_organization_signup", defaults={"boolean_value": True}
+ )[0].boolean_value
+
+ return Response(
+ data={
+ "is_installed": is_installed,
+ "grafana_connection_ok": connected_to_grafana,
+ "token_ok": token_ok,
+ "allow_signup": allow_signup,
+ "is_user_anonymous": self.grafana_context["IsAnonymous"],
+ "license": settings.LICENSE,
+ "version": settings.VERSION,
+ }
+ )
diff --git a/engine/apps/grafana_plugin/views/sync.py b/engine/apps/grafana_plugin/views/sync.py
new file mode 100644
index 0000000000..a0deeec3c0
--- /dev/null
+++ b/engine/apps/grafana_plugin/views/sync.py
@@ -0,0 +1,73 @@
+import logging
+
+from django.apps import apps
+from django.conf import settings
+from rest_framework import status
+from rest_framework.request import Request
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.grafana_plugin.permissions import PluginTokenVerified
+from apps.grafana_plugin.tasks.sync import plugin_sync_organization_async
+from apps.user_management.models import Organization
+from common.api_helpers.mixins import GrafanaHeadersMixin
+
+logger = logging.getLogger(__name__)
+
+
+class PluginSyncView(GrafanaHeadersMixin, APIView):
+ permission_classes = (PluginTokenVerified,)
+
+ def post(self, request: Request) -> Response:
+ stack_id = self.instance_context["stack_id"]
+ org_id = self.instance_context["org_id"]
+
+ is_installed = False
+ try:
+ organization = Organization.objects.get(stack_id=stack_id, org_id=org_id)
+ if organization.api_token_status == Organization.API_TOKEN_STATUS_OK:
+ is_installed = True
+ organization.api_token_status = Organization.API_TOKEN_STATUS_PENDING
+ organization.save(update_fields=["api_token_status"])
+ plugin_sync_organization_async.apply_async((organization.pk,))
+ except Organization.DoesNotExist:
+ logger.info(f"Organization for stack {stack_id} org {org_id} was not found")
+
+ allow_signup = True
+ if not organization:
+ DynamicSetting = apps.get_model("base", "DynamicSetting")
+ allow_signup = DynamicSetting.objects.get_or_create(
+ name="allow_plugin_organization_signup", defaults={"boolean_value": True}
+ )[0].boolean_value
+
+ return Response(
+ status=status.HTTP_202_ACCEPTED,
+ data={
+ "is_installed": is_installed,
+ "is_user_anonymous": self.grafana_context["IsAnonymous"],
+ "allow_signup": allow_signup,
+ },
+ )
+
+ def get(self, request: Request) -> Response:
+ stack_id = self.instance_context["stack_id"]
+ org_id = self.instance_context["org_id"]
+
+ token_ok = False
+ try:
+ organization = Organization.objects.get(stack_id=stack_id, org_id=org_id)
+ if organization.api_token_status == Organization.API_TOKEN_STATUS_PENDING:
+ return Response(status=status.HTTP_202_ACCEPTED)
+ elif organization.api_token_status == Organization.API_TOKEN_STATUS_OK:
+ token_ok = True
+ except Organization.DoesNotExist:
+ logger.info(f"Organization for stack {stack_id} org {org_id} was not found")
+
+ return Response(
+ status=status.HTTP_200_OK,
+ data={
+ "token_ok": token_ok,
+ "license": settings.LICENSE,
+ "version": settings.VERSION,
+ },
+ )
diff --git a/engine/apps/grafana_plugin/views/sync_organization.py b/engine/apps/grafana_plugin/views/sync_organization.py
new file mode 100644
index 0000000000..faf3b5cac2
--- /dev/null
+++ b/engine/apps/grafana_plugin/views/sync_organization.py
@@ -0,0 +1,25 @@
+from contextlib import suppress
+
+from rest_framework import status
+from rest_framework.request import Request
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.grafana_plugin.permissions import PluginTokenVerified
+from apps.user_management.models import Organization
+from apps.user_management.sync import sync_organization
+from common.api_helpers.mixins import GrafanaHeadersMixin
+
+
+class SyncOrganizationView(GrafanaHeadersMixin, APIView):
+ permission_classes = (PluginTokenVerified,)
+
+ def post(self, request: Request) -> Response:
+ stack_id = self.instance_context["stack_id"]
+ org_id = self.instance_context["org_id"]
+
+ with suppress(Organization.DoesNotExist):
+ organization = Organization.objects.get(stack_id=stack_id, org_id=org_id)
+ sync_organization(organization)
+
+ return Response(status=status.HTTP_200_OK)
diff --git a/engine/apps/grafana_plugin_management/__init__.py b/engine/apps/grafana_plugin_management/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/grafana_plugin_management/urls.py b/engine/apps/grafana_plugin_management/urls.py
new file mode 100644
index 0000000000..91ea363bf2
--- /dev/null
+++ b/engine/apps/grafana_plugin_management/urls.py
@@ -0,0 +1,13 @@
+from django.urls import include, path
+
+from apps.grafana_plugin_management.views import PluginInstallationsView
+from common.api_helpers.optional_slash_router import OptionalSlashRouter
+
+app_name = "grafana-plugin-management"
+
+router = OptionalSlashRouter()
+router.register(r"plugin_installations", PluginInstallationsView, basename="plugin_installations")
+
+urlpatterns = [
+ path("", include(router.urls)),
+]
diff --git a/engine/apps/grafana_plugin_management/views/__init__.py b/engine/apps/grafana_plugin_management/views/__init__.py
new file mode 100644
index 0000000000..ae0b494091
--- /dev/null
+++ b/engine/apps/grafana_plugin_management/views/__init__.py
@@ -0,0 +1 @@
+from .plugin_installations import PluginInstallationsView # noqa: F401
diff --git a/engine/apps/grafana_plugin_management/views/plugin_installations.py b/engine/apps/grafana_plugin_management/views/plugin_installations.py
new file mode 100644
index 0000000000..3268aa88a6
--- /dev/null
+++ b/engine/apps/grafana_plugin_management/views/plugin_installations.py
@@ -0,0 +1,64 @@
+from rest_framework import status
+from rest_framework.authentication import BasicAuthentication, SessionAuthentication
+from rest_framework.decorators import action
+from rest_framework.mixins import CreateModelMixin, ListModelMixin, RetrieveModelMixin
+from rest_framework.response import Response
+from rest_framework.viewsets import GenericViewSet
+
+from apps.api.permissions import IsStaff
+from apps.api.serializers.organization import PluginOrganizationSerializer
+from apps.grafana_plugin.helpers.client import GrafanaAPIClient
+from apps.user_management.models import Organization
+from apps.user_management.sync import sync_organization
+from common.api_helpers.mixins import PublicPrimaryKeyMixin
+
+
+class PluginInstallationsView(
+ PublicPrimaryKeyMixin,
+ CreateModelMixin,
+ RetrieveModelMixin,
+ ListModelMixin,
+ GenericViewSet,
+):
+ authentication_classes = [BasicAuthentication, SessionAuthentication]
+ permission_classes = (IsStaff,)
+
+ model = Organization
+ serializer_class = PluginOrganizationSerializer
+
+ def get_queryset(self):
+ return Organization.objects.all()
+
+ def create(self, request, *args, **kwargs):
+ serializer = self.get_serializer(data=request.data)
+ serializer.is_valid(raise_exception=True)
+ organization = serializer.save()
+ sync_organization(organization)
+ return Response(data=organization.provision_plugin(), status=status.HTTP_201_CREATED)
+
+ @action(methods=["post"], detail=True)
+ def revoke_and_reissue(self, request, pk):
+ organization = self.get_object()
+ serializer = self.get_serializer(organization, data=request.data)
+ serializer.is_valid(raise_exception=True)
+ serializer.save()
+ return Response(data=organization.provision_plugin())
+
+ @action(methods=["post"], detail=True)
+ def revoke(self, request, pk):
+ organization = self.get_object()
+ organization.revoke_plugin()
+ return Response(data={"details": "Plugin token revoked"})
+
+ @action(methods=["get"], detail=True)
+ def status(self, request, pk):
+ organization = self.get_object()
+ client = GrafanaAPIClient(api_url=organization.grafana_url, api_token=organization.api_token)
+ _, grafana_status = client.check_token()
+ return Response(data=grafana_status)
+
+ @action(methods=["post"], detail=True)
+ def sync_organization(self, request, pk):
+ organization = self.get_object()
+ sync_organization(organization)
+ return Response(data={"details": "Sync organization complete"})
diff --git a/engine/apps/heartbeat/__init__.py b/engine/apps/heartbeat/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/heartbeat/admin.py b/engine/apps/heartbeat/admin.py
new file mode 100644
index 0000000000..601aace698
--- /dev/null
+++ b/engine/apps/heartbeat/admin.py
@@ -0,0 +1,5 @@
+from django.contrib import admin
+
+from .models import HeartBeat
+
+admin.site.register(HeartBeat)
diff --git a/engine/apps/heartbeat/migrations/0001_squashed_initial.py b/engine/apps/heartbeat/migrations/0001_squashed_initial.py
new file mode 100644
index 0000000000..a30b29e7a5
--- /dev/null
+++ b/engine/apps/heartbeat/migrations/0001_squashed_initial.py
@@ -0,0 +1,55 @@
+# Generated by Django 3.2.5 on 2022-05-31 14:46
+
+import apps.heartbeat.models
+import django.core.validators
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ('alerts', '0001_squashed_initial'),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='IntegrationHeartBeat',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('timeout_seconds', models.IntegerField(default=0)),
+ ('last_heartbeat_time', models.DateTimeField(default=None, null=True)),
+ ('last_checkup_task_time', models.DateTimeField(default=None, null=True)),
+ ('actual_check_up_task_id', models.CharField(max_length=100)),
+ ('previous_alerted_state_was_life', models.BooleanField(default=True)),
+ ('public_primary_key', models.CharField(default=apps.heartbeat.models.generate_public_primary_key_for_integration_heart_beat, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('alert_receive_channel', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='integration_heartbeat', to='alerts.alertreceivechannel')),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='HeartBeat',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('timeout_seconds', models.IntegerField(default=0)),
+ ('last_heartbeat_time', models.DateTimeField(default=None, null=True)),
+ ('last_checkup_task_time', models.DateTimeField(default=None, null=True)),
+ ('actual_check_up_task_id', models.CharField(max_length=100)),
+ ('previous_alerted_state_was_life', models.BooleanField(default=True)),
+ ('message', models.TextField(default='')),
+ ('title', models.TextField(default='HeartBeat Title')),
+ ('link', models.URLField(default=None, max_length=500, null=True)),
+ ('user_defined_id', models.CharField(default='default', max_length=100)),
+ ('alert_receive_channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='heartbeats', to='alerts.alertreceivechannel')),
+ ],
+ options={
+ 'unique_together': {('alert_receive_channel', 'user_defined_id')},
+ },
+ ),
+ ]
diff --git a/engine/apps/heartbeat/migrations/__init__.py b/engine/apps/heartbeat/migrations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/heartbeat/models.py b/engine/apps/heartbeat/models.py
new file mode 100644
index 0000000000..034c3f1dfd
--- /dev/null
+++ b/engine/apps/heartbeat/models.py
@@ -0,0 +1,244 @@
+import logging
+from urllib.parse import urljoin
+
+import humanize
+from django.conf import settings
+from django.core.validators import MinLengthValidator
+from django.db import models, transaction
+from django.utils import timezone
+
+from apps.integrations.tasks import create_alert
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+
+logger = logging.getLogger(__name__)
+
+
+def generate_public_primary_key_for_integration_heart_beat():
+ prefix = "B"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while IntegrationHeartBeat.objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="IntegrationHeartBeat"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+class BaseHeartBeat(models.Model):
+ """
+ Implements base heartbeat logic
+ """
+
+ class Meta:
+ abstract = True
+
+ created_at = models.DateTimeField(auto_now_add=True)
+ timeout_seconds = models.IntegerField(default=0)
+ last_heartbeat_time = models.DateTimeField(default=None, null=True)
+ last_checkup_task_time = models.DateTimeField(default=None, null=True)
+ actual_check_up_task_id = models.CharField(max_length=100)
+ previous_alerted_state_was_life = models.BooleanField(default=True)
+
+ @classmethod
+ def perform_heartbeat_check(cls, heartbeat_id, task_request_id):
+ with transaction.atomic():
+ heartbeats = cls.objects.filter(pk=heartbeat_id).select_for_update()
+ if len(heartbeats) == 0:
+ logger.info(f"Heartbeat {heartbeat_id} not found {task_request_id}")
+ return
+ heartbeat = heartbeats[0]
+ if task_request_id == heartbeat.actual_check_up_task_id:
+ heartbeat.check_heartbeat_state_and_save()
+ else:
+ logger.info(f"Heartbeat {heartbeat_id} is not actual {task_request_id}")
+
+ def check_heartbeat_state_and_save(self):
+ """
+ Use this method if you want just check heartbeat status.
+ """
+ state_changed = self.check_heartbeat_state()
+ if state_changed:
+ self.save(update_fields=["previous_alerted_state_was_life"])
+ return state_changed
+
+ def check_heartbeat_state(self):
+ """
+ Actually checking heartbeat.
+ Use this method if you want to do changes of heartbeat instance while checking its status.
+ ( See IntegrationHeartBeatAPIView.post() for example )
+ """
+ state_changed = False
+ if self.is_expired:
+ if self.previous_alerted_state_was_life:
+ self.on_heartbeat_expired()
+ self.previous_alerted_state_was_life = False
+ state_changed = True
+ else:
+ if not self.previous_alerted_state_was_life:
+ self.on_heartbeat_restored()
+ self.previous_alerted_state_was_life = True
+ state_changed = True
+ return state_changed
+
+ def on_heartbeat_restored(self):
+ raise NotImplementedError
+
+ def on_heartbeat_expired(self):
+ raise NotImplementedError
+
+ @property
+ def is_expired(self):
+ return self.last_heartbeat_time + timezone.timedelta(seconds=self.timeout_seconds) < timezone.now()
+
+ @property
+ def expiration_time(self):
+ return self.last_heartbeat_time + timezone.timedelta(seconds=self.timeout_seconds)
+
+
+class HeartBeat(BaseHeartBeat):
+ """
+ HeartBeat Integration itself
+ """
+
+ alert_receive_channel = models.ForeignKey(
+ "alerts.AlertReceiveChannel", on_delete=models.CASCADE, related_name="heartbeats"
+ )
+
+ message = models.TextField(default="")
+ title = models.TextField(default="HeartBeat Title")
+ link = models.URLField(max_length=500, default=None, null=True)
+ user_defined_id = models.CharField(default="default", max_length=100)
+
+ def on_heartbeat_restored(self):
+ create_alert.apply_async(
+ kwargs={
+ "title": "[OK] " + self.title,
+ "message": self.title,
+ "image_url": None,
+ "link_to_upstream_details": self.link,
+ "alert_receive_channel_pk": self.alert_receive_channel.pk,
+ "integration_unique_data": {},
+ "raw_request_data": {
+ "is_resolve": True,
+ "id": self.pk,
+ "user_defined_id": self.user_defined_id,
+ },
+ },
+ )
+
+ def on_heartbeat_expired(self):
+ create_alert.apply_async(
+ kwargs={
+ "title": "[EXPIRED] " + self.title,
+ "message": self.message
+ + "\nCreated: {}\nExpires: {}\nLast HeartBeat: {}".format(
+ self.created_at,
+ self.expiration_time,
+ self.last_checkup_task_time,
+ ),
+ "image_url": None,
+ "link_to_upstream_details": self.link,
+ "alert_receive_channel_pk": self.alert_receive_channel.pk,
+ "integration_unique_data": {},
+ "raw_request_data": {
+ "is_resolve": False,
+ "id": self.pk,
+ "user_defined_id": self.user_defined_id,
+ },
+ }
+ )
+
+ class Meta:
+ unique_together = (("alert_receive_channel", "user_defined_id"),)
+
+
+class IntegrationHeartBeat(BaseHeartBeat):
+ """
+ HeartBeat for Integration (FormattedWebhook, Grafana, etc.)
+ """
+
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_integration_heart_beat,
+ )
+
+ alert_receive_channel = models.OneToOneField(
+ "alerts.AlertReceiveChannel", on_delete=models.CASCADE, related_name="integration_heartbeat"
+ )
+
+ @property
+ def repr_settings_for_client_side_logging(self):
+ """
+ Example of execution:
+ timeout: 30 minutes
+ """
+ return f"timeout: {humanize.naturaldelta(self.timeout_seconds)}"
+
+ @property
+ def is_expired(self):
+ if self.last_heartbeat_time is not None:
+ # if heartbeat signal was received check timeout
+ return self.last_heartbeat_time + timezone.timedelta(seconds=self.timeout_seconds) < timezone.now()
+ else:
+ # else heartbeat flow was not received, so heartbeat can't expire.
+ return False
+
+ @property
+ def status(self):
+ """
+ Return bool indicates heartbeat status.
+ True if first heartbeat signal was sent and flow is ok else False.
+ If first heartbeat signal was not send it means that configuration was not finished and status not ok.
+ """
+ if self.last_heartbeat_time is not None:
+ return not self.is_expired
+ else:
+ return False
+
+ @property
+ def link(self):
+ return urljoin(self.alert_receive_channel.integration_url, "heartbeat/")
+
+ def on_heartbeat_restored(self):
+ create_alert.apply_async(
+ kwargs={
+ "title": self.alert_receive_channel.heartbeat_restored_title,
+ "message": self.alert_receive_channel.heartbeat_restored_message,
+ "image_url": None,
+ "link_to_upstream_details": None,
+ "alert_receive_channel_pk": self.alert_receive_channel.pk,
+ "integration_unique_data": {},
+ "raw_request_data": self.alert_receive_channel.heartbeat_restored_payload,
+ },
+ )
+
+ def on_heartbeat_expired(self):
+ create_alert.apply_async(
+ kwargs={
+ "title": self.alert_receive_channel.heartbeat_expired_title,
+ "message": self.alert_receive_channel.heartbeat_expired_message,
+ "image_url": None,
+ "link_to_upstream_details": None,
+ "alert_receive_channel_pk": self.alert_receive_channel.pk,
+ "integration_unique_data": {},
+ "raw_request_data": self.alert_receive_channel.heartbeat_expired_payload,
+ },
+ )
+
+ TIMEOUT_CHOICES = (
+ (60, "1 minute"),
+ (120, "2 minutes"),
+ (180, "3 minutes"),
+ (300, "5 minutes"),
+ (600, "10 minutes"),
+ (900, "15 minutes"),
+ (1800, "30 minutes"),
+ (3600, "1 hour"),
+ (43200, "12 hours"),
+ (86400, "1 day"),
+ )
diff --git a/engine/apps/heartbeat/tasks.py b/engine/apps/heartbeat/tasks.py
new file mode 100644
index 0000000000..55b92fce1c
--- /dev/null
+++ b/engine/apps/heartbeat/tasks.py
@@ -0,0 +1,80 @@
+from time import perf_counter
+
+from celery.utils.log import get_task_logger
+from django.apps import apps
+from django.db import transaction
+from django.utils import timezone
+
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+logger = get_task_logger(__name__)
+
+
+@shared_dedicated_queue_retry_task(bind=True)
+def heartbeat_checkup(self, heartbeat_id):
+ HeartBeat = apps.get_model("heartbeat", "HeartBeat")
+ HeartBeat.perform_heartbeat_check(heartbeat_id, heartbeat_checkup.request.id)
+
+
+@shared_dedicated_queue_retry_task()
+def integration_heartbeat_checkup(heartbeat_id):
+ IntegrationHeartBeat = apps.get_model("heartbeat", "IntegrationHeartBeat")
+ IntegrationHeartBeat.perform_heartbeat_check(heartbeat_id, integration_heartbeat_checkup.request.id)
+
+
+@shared_dedicated_queue_retry_task()
+def restore_heartbeat_tasks():
+ """
+ Restore heartbeat tasks in case they got lost for some reason
+ """
+ HeartBeat = apps.get_model("heartbeat", "HeartBeat")
+ for heartbeat in HeartBeat.objects.all():
+ if (
+ heartbeat.last_checkup_task_time
+ + timezone.timedelta(minutes=5)
+ + timezone.timedelta(seconds=heartbeat.timeout_seconds)
+ < timezone.now()
+ ):
+ task = heartbeat_checkup.apply_async((heartbeat.pk,), countdown=5)
+ heartbeat.actual_check_up_task_id = task.id
+ heartbeat.save()
+
+
+@shared_dedicated_queue_retry_task()
+def process_heartbeat_task(alert_receive_channel_pk):
+ start = perf_counter()
+ IntegrationHeartBeat = apps.get_model("heartbeat", "IntegrationHeartBeat")
+ with transaction.atomic():
+ heartbeats = IntegrationHeartBeat.objects.filter(
+ alert_receive_channel__pk=alert_receive_channel_pk,
+ ).select_for_update()
+ if len(heartbeats) == 0:
+ logger.info(f"Integration Heartbeat for alert_receive_channel {alert_receive_channel_pk} was not found.")
+ return
+ else:
+ heartbeat = heartbeats[0]
+ heartbeat_selected = perf_counter()
+ logger.info(
+ f"IntegrationHeartBeat selected for alert_receive_channel {alert_receive_channel_pk} in {heartbeat_selected - start}"
+ )
+ task = integration_heartbeat_checkup.apply_async(
+ (heartbeat.pk,),
+ countdown=heartbeat.timeout_seconds + 1,
+ )
+ is_touched = heartbeat.last_heartbeat_time is not None
+ heartbeat.actual_check_up_task_id = task.id
+ heartbeat.last_heartbeat_time = timezone.now()
+ update_fields = ["actual_check_up_task_id", "last_heartbeat_time"]
+ task_started = perf_counter()
+ logger.info(
+ f"heartbeat_checkup task started for alert_receive_channel {alert_receive_channel_pk} in {task_started - start}"
+ )
+ if is_touched:
+ state_changed = heartbeat.check_heartbeat_state()
+ state_checked = perf_counter()
+ logger.info(
+ f"state checked for alert_receive_channel {alert_receive_channel_pk} in {state_checked - start}"
+ )
+ if state_changed:
+ update_fields.append("previous_alerted_state_was_life")
+ heartbeat.save(update_fields=update_fields)
diff --git a/engine/apps/heartbeat/tests/__init__.py b/engine/apps/heartbeat/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/heartbeat/tests/factories.py b/engine/apps/heartbeat/tests/factories.py
new file mode 100644
index 0000000000..5e69db9de9
--- /dev/null
+++ b/engine/apps/heartbeat/tests/factories.py
@@ -0,0 +1,10 @@
+import factory
+
+from apps.heartbeat.models import IntegrationHeartBeat
+
+
+class IntegrationHeartBeatFactory(factory.DjangoModelFactory):
+ actual_check_up_task_id = "none"
+
+ class Meta:
+ model = IntegrationHeartBeat
diff --git a/engine/apps/heartbeat/tests/test_integration_heartbeat.py b/engine/apps/heartbeat/tests/test_integration_heartbeat.py
new file mode 100644
index 0000000000..917f3f368e
--- /dev/null
+++ b/engine/apps/heartbeat/tests/test_integration_heartbeat.py
@@ -0,0 +1,86 @@
+from unittest.mock import patch
+
+import pytest
+from django.utils import timezone
+
+from apps.alerts.models import AlertReceiveChannel
+
+
+@pytest.mark.django_db
+@patch("apps.heartbeat.models.IntegrationHeartBeat.on_heartbeat_expired", return_value=None)
+@pytest.mark.parametrize("integration", [AlertReceiveChannel.INTEGRATION_FORMATTED_WEBHOOK])
+def test_integration_heartbeat_expired(
+ mocked_handler, make_organization_and_user, make_alert_receive_channel, make_integration_heartbeat, integration
+):
+ amixr_team, _ = make_organization_and_user()
+ # Some short timeout and last_heartbeat_time to make sure that heartbeat is expired
+ timeout = 1
+ last_heartbeat_time = timezone.now() - timezone.timedelta(seconds=timeout * 10)
+ alert_receive_channel = make_alert_receive_channel(amixr_team, integration=integration)
+ integration_heartbeat = make_integration_heartbeat(
+ alert_receive_channel, timeout, last_heartbeat_time=last_heartbeat_time
+ )
+ integration_heartbeat.check_heartbeat_state_and_save()
+ assert mocked_handler.called
+
+
+@pytest.mark.django_db
+@patch("apps.heartbeat.models.IntegrationHeartBeat.on_heartbeat_expired", return_value=None)
+@pytest.mark.parametrize("integration", [AlertReceiveChannel.INTEGRATION_FORMATTED_WEBHOOK])
+def test_integration_heartbeat_already_expired(
+ mocked_handler, make_organization_and_user, make_alert_receive_channel, make_integration_heartbeat, integration
+):
+ amixr_team, _ = make_organization_and_user()
+ # Some short timeout and last_heartbeat_time to make sure that heartbeat is expired
+ timeout = 1
+ last_heartbeat_time = timezone.now() - timezone.timedelta(seconds=timeout * 10)
+ alert_receive_channel = make_alert_receive_channel(amixr_team, integration=integration)
+ integration_heartbeat = make_integration_heartbeat(
+ alert_receive_channel,
+ timeout,
+ last_heartbeat_time=last_heartbeat_time,
+ previous_alerted_state_was_life=False,
+ )
+ integration_heartbeat.check_heartbeat_state_and_save()
+ assert mocked_handler.called is False
+
+
+@pytest.mark.django_db
+@patch("apps.heartbeat.models.IntegrationHeartBeat.on_heartbeat_restored", return_value=None)
+@pytest.mark.parametrize("integration", [AlertReceiveChannel.INTEGRATION_FORMATTED_WEBHOOK])
+def test_integration_heartbeat_restored(
+ mocked_handler, make_organization_and_user, make_alert_receive_channel, make_integration_heartbeat, integration
+):
+ amixr_team, _ = make_organization_and_user()
+ # Some long timeout and last_heartbeat_time to make sure that heartbeat is not expired
+ timeout = 1000
+ last_heartbeat_time = timezone.now()
+ alert_receive_channel = make_alert_receive_channel(amixr_team, integration=integration)
+ integration_heartbeat = make_integration_heartbeat(
+ alert_receive_channel,
+ timeout,
+ last_heartbeat_time=last_heartbeat_time,
+ previous_alerted_state_was_life=False,
+ )
+ integration_heartbeat.check_heartbeat_state_and_save()
+ assert mocked_handler.called
+
+
+@pytest.mark.django_db
+@patch("apps.heartbeat.models.IntegrationHeartBeat.on_heartbeat_restored", return_value=None)
+@pytest.mark.parametrize("integration", [AlertReceiveChannel.INTEGRATION_FORMATTED_WEBHOOK])
+def test_integration_heartbeat_restored_and_alert_was_not_sent(
+ mocked_handler, make_organization_and_user, make_alert_receive_channel, make_integration_heartbeat, integration
+):
+ amixr_team, _ = make_organization_and_user()
+ # Some long timeout and last_heartbeat_time to make sure that heartbeat is not expired
+ timeout = 1000
+ last_heartbeat_time = timezone.now()
+ alert_receive_channel = make_alert_receive_channel(amixr_team, integration=integration)
+ integration_heartbeat = make_integration_heartbeat(
+ alert_receive_channel,
+ timeout,
+ last_heartbeat_time=last_heartbeat_time,
+ )
+ integration_heartbeat.check_heartbeat_state_and_save()
+ assert mocked_handler.called is False
diff --git a/engine/apps/integrations/__init__.py b/engine/apps/integrations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/integrations/metadata/__init__.py b/engine/apps/integrations/metadata/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/integrations/metadata/configuration/alertmanager.py b/engine/apps/integrations/metadata/configuration/alertmanager.py
new file mode 100644
index 0000000000..948927a49a
--- /dev/null
+++ b/engine/apps/integrations/metadata/configuration/alertmanager.py
@@ -0,0 +1,254 @@
+# Main
+enabled = True
+title = "AlertManager"
+slug = "alertmanager"
+short_description = "Prometheus"
+is_displayed_on_web = True
+is_featured = False
+is_able_to_autoresolve = True
+is_demo_alert_enabled = True
+
+description = """
+Alerts from Grafana Alertmanager are automatically routed to this integration."
+{% for dict_item in grafana_alerting_entities %}
+ Click here
+ to open contact point, and
+ here
+ to open routes for {{dict_item.alertmanager_name}} Alertmanager.
+{% endfor %}
+{% if not is_finished_alerting_setup %}
+ Creating contact points and routes for other alertmanagers...
+{% endif %}"""
+
+# Default templates
+slack_title = """\
+{# Usually title is located in payload.labels.alertname #}
+{% set title = payload.get("labels", {}).get("alertname", "No title (check Web Title Template)") %}
+{# Combine the title from different built-in variables into slack-formatted url #}
+*<{{ grafana_oncall_link }}|#{{ grafana_oncall_incident_id }} {{ title }}>* via {{ integration_name }}
+{% if source_link %}
+ (*<{{ source_link }}|source>*)
+{%- endif %}
+"""
+
+slack_message = """\
+{{- payload.message }}
+{%- if "status" in payload -%}
+*Status*: {{ payload.status }}
+{% endif -%}
+*Labels:* {% for k, v in payload["labels"].items() %}
+{{ k }}: {{ v }}{% endfor %}
+*Annotations:*
+{%- for k, v in payload.get("annotations", {}).items() %}
+{#- render annotation as slack markdown url if it starts with http #}
+{{ k }}: {% if v.startswith("http") %} <{{v}}|here> {% else %} {{v}} {% endif -%}
+{% endfor %}
+""" # noqa: W291
+
+
+slack_image_url = None
+
+web_title = """\
+{# Usually title is located in payload.labels.alertname #}
+{{- payload.get("labels", {}).get("alertname", "No title (check Web Title Template)") }}
+"""
+
+web_message = """\
+{{- payload.message }}
+{%- if "status" in payload %}
+**Status**: {{ payload.status }}
+{% endif -%}
+**Labels:** {% for k, v in payload["labels"].items() %}
+*{{ k }}*: {{ v }}{% endfor %}
+**Annotations:**
+{%- for k, v in payload.get("annotations", {}).items() %}
+{#- render annotation as markdown url if it starts with http #}
+*{{ k }}*: {% if v.startswith("http") %} [here]({{v}}){% else %} {{v}} {% endif -%}
+{% endfor %}
+""" # noqa: W291
+
+
+web_image_url = slack_image_url
+
+sms_title = '{{ payload.get("labels", {}).get("alertname", "Title undefined") }}'
+phone_call_title = sms_title
+
+email_title = web_title
+
+email_message = """\
+{{- payload.messsage }}
+{%- if "status" in payload -%}
+**Status**: {{ payload.status }}
+{% endif -%}
+**Labels:** {% for k, v in payload["labels"].items() %}
+{{ k }}: {{ v }}{% endfor %}
+**Annotations:**
+{%- for k, v in payload.get("annotations", {}).items() %}
+{#- render annotation as markdown url if it starts with http #}
+{{ k }}: {{v}}
+{% endfor %}
+""" # noqa: W291
+
+telegram_title = sms_title
+
+telegram_message = """\
+{{- payload.messsage }}
+{%- if "status" in payload -%}
+Status : {{ payload.status }}
+{% endif -%}
+Labels:** {% for k, v in payload["labels"].items() %}
+{{ k }}: {{ v }}{% endfor %}
+Annotations:
+{%- for k, v in payload.get("annotations", {}).items() %}
+{#- render annotation as markdown url if it starts with http #}
+{{ k }}: {{ v }}
+{% endfor %}""" # noqa: W291
+
+telegram_image_url = slack_image_url
+
+source_link = "{{ payload.generatorURL }}"
+
+grouping_id = "{{ payload.labels }}"
+
+resolve_condition = """\
+{{ payload.get("status", "") == "resolved" }}
+"""
+
+acknowledge_condition = None
+
+group_verbose_name = "Incident"
+
+tests = {
+ "payload": {
+ "endsAt": "0001-01-01T00:00:00Z",
+ "labels": {
+ "job": "kube-state-metrics",
+ "instance": "10.143.139.7:8443",
+ "job_name": "email-tracking-perform-initialization-1.0.50",
+ "severity": "warning",
+ "alertname": "KubeJobCompletion",
+ "namespace": "default",
+ "prometheus": "monitoring/k8s",
+ },
+ "status": "firing",
+ "startsAt": "2019-12-13T08:57:35.095800493Z",
+ "annotations": {
+ "message": "Job default/email-tracking-perform-initialization-1.0.50 is taking more than one hour to complete.",
+ "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobcompletion",
+ },
+ "generatorURL": (
+ "https://localhost/prometheus/graph?g0.expr=kube_job_spec_completions%7Bjob%3D%22kube-state-metrics%22%7D"
+ "+-+kube_job_status_succeeded%7Bjob%3D%22kube-state-metrics%22%7D+%3E+0&g0.tab=1"
+ ),
+ },
+ "slack": {
+ "title": (
+ "*<{web_link}|#1 KubeJobCompletion>* via {integration_name} "
+ "(*<"
+ "https://localhost/prometheus/graph?g0.expr=kube_job_spec_completions%7Bjob%3D%22kube-state-metrics%22%7D"
+ "+-+kube_job_status_succeeded%7Bjob%3D%22kube-state-metrics%22%7D+%3E+0&g0.tab=1"
+ "|source>*)"
+ ),
+ "message": (
+ "*Status*: firing\n"
+ "*Labels:* \n"
+ "job: kube-state-metrics\n"
+ "instance: 10.143.139.7:8443\n"
+ "job_name: email-tracking-perform-initialization-1.0.50\n"
+ "severity: warning\n"
+ "alertname: KubeJobCompletion\n"
+ "namespace: default\n"
+ "prometheus: monitoring/k8s\n"
+ "*Annotations:*\n"
+ "message: Job default/email-tracking-perform-initialization-1.0.50 is taking more than one hour to complete. \n"
+ "runbook_url: "
+ ),
+ "image_url": None,
+ },
+ "web": {
+ "title": "KubeJobCompletion",
+ "message": (
+ ""
+ "Status : firing "
+ "Labels: "
+ "job : kube-state-metrics "
+ "instance : 10.143.139.7:8443 "
+ "job_name : email-tracking-perform-initialization-1.0.50 "
+ "severity : warningalertname : KubeJobCompletion "
+ "namespace : defaultprometheus : monitoring/k8s "
+ "Annotations: "
+ "message : Job default/email-tracking-perform-initialization-1.0.50 is taking more than one hour to complete. "
+ "runbook_url : "
+ 'here '
+ "
"
+ ),
+ "image_url": None,
+ },
+ "sms": {
+ "title": "KubeJobCompletion",
+ },
+ "phone_call": {
+ "title": "KubeJobCompletion",
+ },
+ "email": {
+ "title": "KubeJobCompletion",
+ "message": (
+ "**Status**: firing\n"
+ "**Labels:** \n"
+ "job: kube-state-metrics\n"
+ "instance: 10.143.139.7:8443\n"
+ "job_name: email-tracking-perform-initialization-1.0.50\n"
+ "severity: warning\n"
+ "alertname: KubeJobCompletion\n"
+ "namespace: default\n"
+ "prometheus: monitoring/k8s\n"
+ "**Annotations:**\n"
+ "message: Job default/email-tracking-perform-initialization-1.0.50 is taking more than one hour to complete.\n\n"
+ "runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobcompletion\n"
+ ),
+ },
+ "telegram": {
+ "title": "KubeJobCompletion",
+ "message": (
+ "Status : firing\n"
+ "Labels:** \n"
+ "job: kube-state-metrics\n"
+ "instance: 10.143.139.7:8443\n"
+ "job_name: email-tracking-perform-initialization-1.0.50\n"
+ "severity: warning\n"
+ "alertname: KubeJobCompletion\n"
+ "namespace: default\n"
+ "prometheus: monitoring/k8s\n"
+ "Annotations: \n"
+ "message: Job default/email-tracking-perform-initialization-1.0.50 is taking more than one hour to complete.\n\n"
+ "runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobcompletion\n"
+ ),
+ "image_url": None,
+ },
+}
+
+# Misc
+example_payload = {
+ "receiver": "amixr",
+ "status": "firing",
+ "alerts": [
+ {
+ "status": "firing",
+ "labels": {
+ "alertname": "TestAlert",
+ "region": "eu-1",
+ },
+ "annotations": {"description": "This alert was sent by user for the demonstration purposes"},
+ "startsAt": "2018-12-25T15:47:47.377363608Z",
+ "endsAt": "0001-01-01T00:00:00Z",
+ "generatorURL": "",
+ "amixr_demo": True,
+ }
+ ],
+ "groupLabels": {},
+ "commonLabels": {},
+ "commonAnnotations": {},
+ "externalURL": "http://f1d1ef51d710:9093",
+ "version": "4",
+ "groupKey": "{}:{}",
+}
diff --git a/engine/apps/integrations/metadata/configuration/amazon_sns.py b/engine/apps/integrations/metadata/configuration/amazon_sns.py
new file mode 100644
index 0000000000..954542d010
--- /dev/null
+++ b/engine/apps/integrations/metadata/configuration/amazon_sns.py
@@ -0,0 +1,99 @@
+# Main
+enabled = True
+title = "Amazon SNS"
+slug = "amazon_sns"
+short_description = None
+is_displayed_on_web = True
+description = None
+is_featured = False
+is_able_to_autoresolve = True
+is_demo_alert_enabled = True
+
+description = None
+
+# Default templates
+slack_title = """\
+{% if payload|length == 0 -%}
+{% set title = payload.get("AlarmName", "Alert") %}
+{%- else -%}
+{% set title = "Alert" %}
+{%- endif %}
+
+*<{{ grafana_oncall_link }}|#{{ grafana_oncall_incident_id }} {{ title }}>* via {{ integration_name }}
+{% if source_link %}
+ (*<{{ source_link }}|source>*)
+{%- endif %}"""
+
+slack_message = """\
+{% if payload|length == 1 and "message" in payload -%}
+{{ payload.get("message", "Non-JSON payload received. Please make sure you publish monitoring Alarms to SNS, not logs: https://docs.amixr.io/#/integrations/amazon_sns") }}
+{%- else -%}
+*State* {{ payload.get("NewStateValue", "NO") }}
+Region: {{ payload.get("Region", "Undefined") }}
+_Description_: {{ payload.get("AlarmDescription", "Undefined") }}
+{%- endif %}
+"""
+
+slack_image_url = None
+
+web_title = """\
+{% if payload|length == 0 -%}
+{{ payload.get("AlarmName", "Alert")}}
+{%- else -%}
+Alert
+{%- endif %}"""
+
+web_message = """\
+{% if payload|length == 1 and "message" in payload -%}
+{{ payload.get("message", "Non-JSON payload received. Please make sure you publish monitoring Alarms to SNS, not logs: https://docs.amixr.io/#/integrations/amazon_sns") }}
+{%- else -%}
+**State** {{ payload.get("NewStateValue", "NO") }}
+Region: {{ payload.get("Region", "Undefined") }}
+*Description*: {{ payload.get("AlarmDescription", "Undefined") }}
+{%- endif %}
+"""
+
+web_image_url = slack_image_url
+
+sms_title = web_title
+
+phone_call_title = web_title
+
+email_title = web_title
+
+email_message = "{{ payload|tojson_pretty }}"
+
+telegram_title = sms_title
+
+telegram_message = """\
+{% if payload|length == 1 and "message" in payload -%}
+{{ payload.get("message", "Non-JSON payload received. Please make sure you publish monitoring Alarms to SNS, not logs: https://docs.amixr.io/#/integrations/amazon_sns") }}
+{%- else -%}
+State {{ payload.get("NewStateValue", "NO") }}
+Region: {{ payload.get("Region", "Undefined") }}
+Description : {{ payload.get("AlarmDescription", "Undefined") }}
+{%- endif %}
+"""
+
+telegram_image_url = slack_image_url
+
+source_link = """\
+{% if payload|length == 0 -%}
+{% if payload.get("Trigger", {}).get("Namespace") == "AWS/ElasticBeanstalk" -%}
+https://console.aws.amazon.com/elasticbeanstalk/home?region={{ payload.get("TopicArn").split(":")[3] }}
+{%- else -%}
+https://console.aws.amazon.com/cloudwatch//home?region={{ payload.get("TopicArn").split(":")[3] }}
+{%- endif %}
+{%- endif %}"""
+
+grouping_id = web_title
+
+resolve_condition = """\
+{{ payload.get("NewStateValue", "") == "OK" }}
+"""
+
+acknowledge_condition = None
+
+group_verbose_name = web_title
+
+example_payload = {"foo": "bar"}
diff --git a/engine/apps/integrations/metadata/configuration/formatted_webhook.py b/engine/apps/integrations/metadata/configuration/formatted_webhook.py
new file mode 100644
index 0000000000..6f712a23a8
--- /dev/null
+++ b/engine/apps/integrations/metadata/configuration/formatted_webhook.py
@@ -0,0 +1,62 @@
+# Main
+enabled = True
+title = "Formatted Webhook"
+slug = "formatted_webhook"
+short_description = None
+description = None
+is_displayed_on_web = True
+is_featured = False
+is_able_to_autoresolve = True
+is_demo_alert_enabled = True
+
+description = None
+
+# Default templates
+slack_title = """\
+*<{{ grafana_oncall_link }}|#{{ grafana_oncall_incident_id }} {{ payload.get("title", "Title undefined (Check Slack Title Template)") }}>* via {{ integration_name }}
+{% if source_link %}
+ (*<{{ source_link }}|source>*)
+{%- endif %}"""
+
+slack_message = "{{ payload.message }}"
+
+slack_image_url = "{{ payload.image_url }}"
+
+web_title = '{{ payload.get("title", "Title undefined (Check Web Title Template)") }}'
+
+web_message = slack_message
+
+web_image_url = slack_image_url
+
+sms_title = web_title
+
+phone_call_title = sms_title
+
+email_title = web_title
+
+email_message = slack_message
+
+telegram_title = sms_title
+
+telegram_message = slack_message
+
+telegram_image_url = slack_image_url
+
+source_link = "{{ payload.link_to_upstream_details }}"
+
+grouping_id = '{{ payload.get("alert_uid", "") }}'
+
+resolve_condition = '{{ payload.get("state", "").upper() == "OK" }}'
+
+acknowledge_condition = None
+
+group_verbose_name = web_title
+
+example_payload = {
+ "alert_uid": "08d6891a-835c-e661-39fa-96b6a9e26552",
+ "title": "TestAlert: The whole system is down",
+ "image_url": "https://upload.wikimedia.org/wikipedia/commons/e/ee/Grumpy_Cat_by_Gage_Skidmore.jpg",
+ "state": "alerting",
+ "link_to_upstream_details": "https://en.wikipedia.org/wiki/Downtime",
+ "message": "This alert was sent by user for the demonstration purposes\nSmth happened. Oh no!",
+}
diff --git a/engine/apps/integrations/metadata/configuration/grafana.py b/engine/apps/integrations/metadata/configuration/grafana.py
new file mode 100644
index 0000000000..383390c444
--- /dev/null
+++ b/engine/apps/integrations/metadata/configuration/grafana.py
@@ -0,0 +1,287 @@
+# Main
+enabled = True
+title = "Grafana"
+slug = "grafana"
+short_description = "Other grafana"
+description = None
+is_displayed_on_web = True
+is_featured = False
+is_able_to_autoresolve = True
+is_demo_alert_enabled = True
+
+description = None
+
+# Default templates
+slack_title = """\
+{# Usually title is located in payload.labels.alertname #}
+{% set title = payload.get("title", "") or payload.get("labels", {}).get("alertname", "No title (check Web Title Template)") %}
+{# Combine the title from different built-in variables into slack-formatted url #}
+*<{{ grafana_oncall_link }}|#{{ grafana_oncall_incident_id }} {{ title }}>* via {{ integration_name }}
+{% if source_link %}
+ (*<{{ source_link }}|source>*)
+{%- endif %}
+"""
+
+slack_message = """\
+{{- payload.message }}
+{%- for value in payload.get("evalMatches", []) %}
+*{{ value.metric }}*: {{ value.value }}
+{% endfor -%}
+{%- if "status" in payload -%}
+*Status*: {{ payload.status }}
+{% endif -%}
+{%- if "labels" in payload -%}
+*Labels:* {% for k, v in payload["labels"].items() %}
+{{ k }}: {{ v }}{% endfor %}
+{% endif -%}
+{%- if "annotations" in payload -%}
+*Annotations:*
+{%- for k, v in payload.get("annotations", {}).items() %}
+{#- render annotation as slack markdown url if it starts with http #}
+{{ k }}: {% if v.startswith("http") %} <{{v}}|here> {% else %} {{v}} {% endif -%}
+{% endfor %}
+{%- endif -%}
+"""
+
+slack_image_url = """\
+{{- payload.get(imageUrl) -}}
+"""
+
+web_title = """\
+{# Usually title is located in payload.labels.alertname #}
+{{- payload.get("title", "") or payload.get("labels", {}).get("alertname", "No title (check Web Title Template)") }}
+"""
+
+web_message = """\
+{{- payload.message }}
+{% for value in payload.get("evalMatches", []) -%}
+**{{ value.metric }}**: {{ value.value }}
+{% endfor %}
+{%- if "status" in payload %}
+**Status**: {{ payload.status }}
+{% endif -%}
+{%- if "labels" in payload -%}
+**Labels:** {% for k, v in payload["labels"].items() %}
+*{{ k }}*: {{ v }}{% endfor %}
+{% endif -%}
+{%- if "annotations" in payload -%}
+**Annotations:**
+{%- for k, v in payload.get("annotations", {}).items() %}
+{#- render annotation as markdown url if it starts with http #}
+*{{ k }}*: {% if v.startswith("http") %} [here]({{v}}){% else %} {{v}} {% endif -%}
+{% endfor %}
+{% endif -%}
+"""
+
+web_image_url = slack_image_url
+
+sms_title = """\
+{{ payload.get("ruleName", "") or payload.get("labels", {}).get("alertname", "Title undefined") }}
+"""
+
+phone_call_title = sms_title
+
+email_title = web_title
+
+email_message = """\
+{{- payload.message }}
+{%- for value in payload.get("evalMatches", []) %}
+**{{ value.metric }}**: {{ value.value }}
+{% endfor -%}
+{%- if "status" in payload -%}
+**Status**: {{ payload.status }}
+{% endif -%}
+{%- if "labels" in payload -%}
+**Labels:** {% for k, v in payload["labels"].items() %}
+{{ k }}: {{ v }}{% endfor %}
+{% endif -%}
+{%- if "annotations" in payload -%}
+**Annotations:**
+{%- for k, v in payload.get("annotations", {}).items() %}
+{#- render annotation as markdown url if it starts with http #}
+{{ k }}: {{v}}
+{% endfor %}
+{%- endif -%}
+"""
+
+telegram_title = sms_title
+
+telegram_message = """\
+{{- payload.messsage }}
+{%- for value in payload.get("evalMatches", []) %}
+{{ value.metric }}: {{ value.value }}
+{% endfor -%}
+{%- if "status" in payload -%}
+Status : {{ payload.status }}
+{% endif -%}
+{%- if "labels" in payload -%}
+Labels: {% for k, v in payload["labels"].items() %}
+{{ k }}: {{ v }}{% endfor %}
+{% endif -%}
+{%- if "annotations" in payload -%}
+Annotations:
+{%- for k, v in payload.get("annotations", {}).items() %}
+{#- render annotation as markdown url if it starts with http #}
+{{ k }}: {{ v }}
+{% endfor %}
+{%- endif -%}
+"""
+
+telegram_image_url = slack_image_url
+
+source_link = """\
+{{ payload.get("ruleUrl", "") or payload.generatorURL }}
+"""
+
+grouping_id = """\
+{{ payload.get("ruleName", "") or payload.get("labels", {}).get("alertname", "No title (check Web Title Template)") }}
+"""
+
+resolve_condition = """\
+{{ payload.get("state") == "ok" or payload.get("status", "") == "resolved" }}
+"""
+
+acknowledge_condition = None
+
+group_verbose_name = """\
+{{ payload.get("ruleName", "Incident") }}
+"""
+
+tests = {
+ "payload": {
+ "endsAt": "0001-01-01T00:00:00Z",
+ "labels": {
+ "job": "kube-state-metrics",
+ "instance": "10.143.139.7:8443",
+ "job_name": "email-tracking-perform-initialization-1.0.50",
+ "severity": "warning",
+ "alertname": "KubeJobCompletion",
+ "namespace": "default",
+ "prometheus": "monitoring/k8s",
+ },
+ "status": "firing",
+ "startsAt": "2019-12-13T08:57:35.095800493Z",
+ "annotations": {
+ "message": "Job default/email-tracking-perform-initialization-1.0.50 is taking more than one hour to complete.",
+ "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobcompletion",
+ },
+ "generatorURL": (
+ "https://localhost/prometheus/graph?g0.expr=kube_job_spec_completions%7Bjob%3D%22kube-state-metrics%22%7D"
+ "+-+kube_job_status_succeeded%7Bjob%3D%22kube-state-metrics%22%7D+%3E+0&g0.tab=1"
+ ),
+ },
+ "slack": {
+ "title": (
+ "*<{web_link}|#1 KubeJobCompletion>* via {integration_name} "
+ "(*<"
+ "https://localhost/prometheus/graph?g0.expr=kube_job_spec_completions%7Bjob%3D%22kube-state-metrics%22%7D"
+ "+-+kube_job_status_succeeded%7Bjob%3D%22kube-state-metrics%22%7D+%3E+0&g0.tab=1"
+ "|source>*)"
+ ),
+ "message": (
+ "*Status*: firing\n"
+ "*Labels:* \n"
+ "job: kube-state-metrics\n"
+ "instance: 10.143.139.7:8443\n"
+ "job_name: email-tracking-perform-initialization-1.0.50\n"
+ "severity: warning\n"
+ "alertname: KubeJobCompletion\n"
+ "namespace: default\n"
+ "prometheus: monitoring/k8s\n"
+ "*Annotations:*\n"
+ "message: Job default/email-tracking-perform-initialization-1.0.50 is taking more than one hour to complete. \n"
+ "runbook_url: "
+ ),
+ "image_url": None,
+ },
+ "web": {
+ "title": "KubeJobCompletion",
+ "message": (
+ ""
+ "Status : firing "
+ "Labels: "
+ "job : kube-state-metrics "
+ "instance : 10.143.139.7:8443 "
+ "job_name : email-tracking-perform-initialization-1.0.50 "
+ "severity : warningalertname : KubeJobCompletion "
+ "namespace : defaultprometheus : monitoring/k8s "
+ "Annotations: "
+ "message : Job default/email-tracking-perform-initialization-1.0.50 is taking more than one hour to complete. "
+ "runbook_url : "
+ 'here '
+ "
"
+ ),
+ "image_url": None,
+ },
+ "sms": {
+ "title": "KubeJobCompletion",
+ },
+ "phone_call": {
+ "title": "KubeJobCompletion",
+ },
+ "email": {
+ "title": "KubeJobCompletion",
+ "message": (
+ "**Status**: firing\n"
+ "**Labels:** \n"
+ "job: kube-state-metrics\n"
+ "instance: 10.143.139.7:8443\n"
+ "job_name: email-tracking-perform-initialization-1.0.50\n"
+ "severity: warning\n"
+ "alertname: KubeJobCompletion\n"
+ "namespace: default\n"
+ "prometheus: monitoring/k8s\n"
+ "**Annotations:**\n"
+ "message: Job default/email-tracking-perform-initialization-1.0.50 is taking more than one hour to complete.\n\n"
+ "runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobcompletion\n"
+ ),
+ },
+ "telegram": {
+ "title": "KubeJobCompletion",
+ "message": (
+ "Status : firing\n"
+ "Labels: \n"
+ "job: kube-state-metrics\n"
+ "instance: 10.143.139.7:8443\n"
+ "job_name: email-tracking-perform-initialization-1.0.50\n"
+ "severity: warning\n"
+ "alertname: KubeJobCompletion\n"
+ "namespace: default\n"
+ "prometheus: monitoring/k8s\n"
+ "Annotations: \n"
+ "message: Job default/email-tracking-perform-initialization-1.0.50 is taking more than one hour to complete.\n\n"
+ "runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobcompletion\n"
+ ),
+ "image_url": None,
+ },
+ "group_distinction": "c6bf5494a2d3052459b4dac837e41455",
+ "is_resolve_signal": False,
+ "is_acknowledge_signal": False,
+ "group_verbose_name": "Incident",
+}
+
+# Miscellaneous
+example_payload = {
+ "receiver": "amixr",
+ "status": "firing",
+ "alerts": [
+ {
+ "status": "firing",
+ "labels": {
+ "alertname": "TestAlert",
+ "region": "eu-1",
+ },
+ "annotations": {"description": "This alert was sent by user for the demonstration purposes"},
+ "startsAt": "2018-12-25T15:47:47.377363608Z",
+ "endsAt": "0001-01-01T00:00:00Z",
+ "generatorURL": "",
+ "amixr_demo": True,
+ }
+ ],
+ "groupLabels": {},
+ "commonLabels": {},
+ "commonAnnotations": {},
+ "externalURL": "http://f1d1ef51d710:9093",
+ "version": "4",
+ "groupKey": "{}:{}",
+}
diff --git a/engine/apps/integrations/metadata/configuration/grafana_alerting.py b/engine/apps/integrations/metadata/configuration/grafana_alerting.py
new file mode 100644
index 0000000000..ae07e12e46
--- /dev/null
+++ b/engine/apps/integrations/metadata/configuration/grafana_alerting.py
@@ -0,0 +1,256 @@
+# Main
+enabled = True
+title = "Grafana Alerting"
+slug = "grafana_alerting"
+short_description = (
+ "Your current Grafana Cloud stack. Automatically create an alerting contact point and a route in Grafana"
+)
+description = None
+is_displayed_on_web = True
+is_featured = True
+is_able_to_autoresolve = True
+is_demo_alert_enabled = True
+
+description = """ \
+Alerts from Grafana Alertmanager are automatically routed to this integration."
+{% for dict_item in grafana_alerting_entities %}
+ Click here
+ to open contact point, and
+ here
+ to open routes for {{dict_item.alertmanager_name}} Alertmanager.
+{% endfor %}
+{% if not is_finished_alerting_setup %}
+ Creating contact points and routes for other alertmanagers...
+{% endif %}
+"""
+
+# Default templates
+slack_title = """\
+{# Usually title is located in payload.labels.alertname #}
+{% set title = payload.get("labels", {}).get("alertname", "No title (check Web Title Template)") %}
+{# Combine the title from different built-in variables into slack-formatted url #}
+*<{{ grafana_oncall_link }}|#{{ grafana_oncall_incident_id }} {{ title }}>* via {{ integration_name }}
+{% if source_link %}
+ (*<{{ source_link }}|source>*)
+{%- endif %}
+"""
+
+slack_message = """\
+{{- payload.message }}
+{%- if "status" in payload -%}
+*Status*: {{ payload.status }}
+{% endif -%}
+*Labels:* {% for k, v in payload["labels"].items() %}
+{{ k }}: {{ v }}{% endfor %}
+*Annotations:*
+{%- for k, v in payload.get("annotations", {}).items() %}
+{#- render annotation as slack markdown url if it starts with http #}
+{{ k }}: {% if v.startswith("http") %} <{{v}}|here> {% else %} {{v}} {% endif -%}
+{% endfor %}
+""" # noqa:W291
+
+
+slack_image_url = None
+
+web_title = """\
+{# Usually title is located in payload.labels.alertname #}
+{{- payload.get("labels", {}).get("alertname", "No title (check Web Title Template)") }}
+"""
+
+web_message = """\
+{{- payload.message }}
+{%- if "status" in payload %}
+**Status**: {{ payload.status }}
+{% endif -%}
+**Labels:** {% for k, v in payload["labels"].items() %}
+*{{ k }}*: {{ v }}{% endfor %}
+**Annotations:**
+{%- for k, v in payload.get("annotations", {}).items() %}
+{#- render annotation as markdown url if it starts with http #}
+*{{ k }}*: {% if v.startswith("http") %} [here]({{v}}){% else %} {{v}} {% endif -%}
+{% endfor %}
+""" # noqa:W291
+
+
+web_image_url = slack_image_url
+
+sms_title = '{{ payload.get("labels", {}).get("alertname", "Title undefined") }}'
+phone_call_title = sms_title
+
+email_title = web_title
+
+email_message = """\
+{{- payload.messsage }}
+{%- if "status" in payload -%}
+**Status**: {{ payload.status }}
+{% endif -%}
+**Labels:** {% for k, v in payload["labels"].items() %}
+{{ k }}: {{ v }}{% endfor %}
+**Annotations:**
+{%- for k, v in payload.get("annotations", {}).items() %}
+{#- render annotation as markdown url if it starts with http #}
+{{ k }}: {{v}}
+{% endfor %}
+""" # noqa:W291
+
+telegram_title = sms_title
+
+telegram_message = """\
+{{- payload.messsage }}
+{%- if "status" in payload -%}
+Status : {{ payload.status }}
+{% endif -%}
+Labels: {% for k, v in payload["labels"].items() %}
+{{ k }}: {{ v }}{% endfor %}
+Annotations:
+{%- for k, v in payload.get("annotations", {}).items() %}
+{#- render annotation as markdown url if it starts with http #}
+{{ k }}: {{ v }}
+{% endfor %}""" # noqa:W291
+
+telegram_image_url = slack_image_url
+
+source_link = "{{ payload.generatorURL }}"
+
+grouping_id = web_title
+
+resolve_condition = """\
+{{ payload.get("status", "") == "resolved" }}
+"""
+
+acknowledge_condition = None
+
+group_verbose_name = "Incident"
+
+tests = {
+ "payload": {
+ "endsAt": "0001-01-01T00:00:00Z",
+ "labels": {
+ "job": "kube-state-metrics",
+ "instance": "10.143.139.7:8443",
+ "job_name": "email-tracking-perform-initialization-1.0.50",
+ "severity": "warning",
+ "alertname": "KubeJobCompletion",
+ "namespace": "default",
+ "prometheus": "monitoring/k8s",
+ },
+ "status": "firing",
+ "startsAt": "2019-12-13T08:57:35.095800493Z",
+ "annotations": {
+ "message": "Job default/email-tracking-perform-initialization-1.0.50 is taking more than one hour to complete.",
+ "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobcompletion",
+ },
+ "generatorURL": (
+ "https://localhost/prometheus/graph?g0.expr=kube_job_spec_completions%7Bjob%3D%22kube-state-metrics%22%7D+-+"
+ "kube_job_status_succeeded%7Bjob%3D%22kube-state-metrics%22%7D+%3E+0&g0.tab=1"
+ ),
+ },
+ "slack": {
+ "title": (
+ "*<{web_link}|#1 KubeJobCompletion>* via {integration_name} (*<"
+ "https://localhost/prometheus/graph?g0.expr=kube_job_spec_completions%7Bjob%3D%22kube-state-metrics%22%7D+-+"
+ "kube_job_status_succeeded%7Bjob%3D%22kube-state-metrics%22%7D+%3E+0&g0.tab=1"
+ "|source>*)"
+ ),
+ "message": (
+ "*Status*: firing\n"
+ "*Labels:* \n"
+ "job: kube-state-metrics\n"
+ "instance: 10.143.139.7:8443\n"
+ "job_name: email-tracking-perform-initialization-1.0.50\n"
+ "severity: warning\n"
+ "alertname: KubeJobCompletion\n"
+ "namespace: default\n"
+ "prometheus: monitoring/k8s\n"
+ "*Annotations:*\n"
+ "message: Job default/email-tracking-perform-initialization-1.0.50 is taking more than one hour to complete. \n"
+ "runbook_url: "
+ ),
+ "image_url": None,
+ },
+ "web": {
+ "title": "KubeJobCompletion",
+ "message": (
+ ""
+ "Status : firing "
+ "Labels: "
+ "job : kube-state-metrics "
+ "instance : 10.143.139.7:8443 "
+ "job_name : email-tracking-perform-initialization-1.0.50 "
+ "severity : warningalertname : KubeJobCompletion "
+ "namespace : defaultprometheus : monitoring/k8s "
+ "Annotations: "
+ "message : Job default/email-tracking-perform-initialization-1.0.50 is taking more than one hour to complete. "
+ "runbook_url : "
+ 'here '
+ "
"
+ ),
+ "image_url": None,
+ },
+ "sms": {
+ "title": "KubeJobCompletion",
+ },
+ "phone_call": {
+ "title": "KubeJobCompletion",
+ },
+ "email": {
+ "title": "KubeJobCompletion",
+ "message": (
+ "**Status**: firing\n"
+ "**Labels:** \n"
+ "job: kube-state-metrics\n"
+ "instance: 10.143.139.7:8443\n"
+ "job_name: email-tracking-perform-initialization-1.0.50\n"
+ "severity: warning\n"
+ "alertname: KubeJobCompletion\n"
+ "namespace: default\n"
+ "prometheus: monitoring/k8s\n"
+ "**Annotations:**\n"
+ "message: Job default/email-tracking-perform-initialization-1.0.50 is taking more than one hour to complete.\n\n"
+ "runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobcompletion\n"
+ ),
+ },
+ "telegram": {
+ "title": "KubeJobCompletion",
+ "message": (
+ "Status : firing\n"
+ "Labels: \n"
+ "job: kube-state-metrics\n"
+ "instance: 10.143.139.7:8443\n"
+ "job_name: email-tracking-perform-initialization-1.0.50\n"
+ "severity: warning\n"
+ "alertname: KubeJobCompletion\n"
+ "namespace: default\n"
+ "prometheus: monitoring/k8s\n"
+ "Annotations: \n"
+ "message: Job default/email-tracking-perform-initialization-1.0.50 is taking more than one hour to complete.\n\n"
+ "runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobcompletion\n"
+ ),
+ "image_url": None,
+ },
+}
+
+example_payload = {
+ "receiver": "amixr",
+ "status": "firing",
+ "alerts": [
+ {
+ "status": "firing",
+ "labels": {
+ "alertname": "TestAlert",
+ "region": "eu-1",
+ },
+ "annotations": {"description": "This alert was sent by user for the demonstration purposes"},
+ "startsAt": "2018-12-25T15:47:47.377363608Z",
+ "endsAt": "0001-01-01T00:00:00Z",
+ "generatorURL": "",
+ "amixr_demo": True,
+ }
+ ],
+ "groupLabels": {},
+ "commonLabels": {},
+ "commonAnnotations": {},
+ "externalURL": "http://f1d1ef51d710:9093",
+ "version": "4",
+ "groupKey": "{}:{}",
+}
diff --git a/engine/apps/integrations/metadata/configuration/heartbeat.py b/engine/apps/integrations/metadata/configuration/heartbeat.py
new file mode 100644
index 0000000000..f051a44c5c
--- /dev/null
+++ b/engine/apps/integrations/metadata/configuration/heartbeat.py
@@ -0,0 +1,31 @@
+# Main
+enabled = True
+title = "Heartbeat"
+slug = "heartbeat"
+short_description = None
+description = None
+is_displayed_on_web = False
+is_featured = False
+is_able_to_autoresolve = True
+is_demo_alert_enabled = False
+
+description = None
+
+# Default templates
+slack_title = """\
+*<{{ grafana_oncall_link }}|#{{ grafana_oncall_incident_id }} {{ payload.get("title", "Title undefined (check Slack Title Template)") }}>* via {{ integration_name }}
+{% if source_link %}
+ (*<{{ source_link }}|source>*)
+{%- endif %}"""
+
+grouping_id = """\
+{{ payload.get("id", "") }}{{ payload.get("user_defined_id", "") }}
+"""
+
+resolve_condition = '{{ payload.get("is_resolve", False) == True }}'
+
+acknowledge_condition = None
+
+group_verbose_name = '{{ payload.get("title", "Title") }}'
+
+example_payload = {"foo": "bar"}
diff --git a/engine/apps/integrations/metadata/configuration/inbound_email.py b/engine/apps/integrations/metadata/configuration/inbound_email.py
new file mode 100644
index 0000000000..b934e35a69
--- /dev/null
+++ b/engine/apps/integrations/metadata/configuration/inbound_email.py
@@ -0,0 +1,53 @@
+# Main
+enabled = True
+title = "Inboubd Email"
+slug = "inbound_email"
+short_description = None
+description = None
+is_displayed_on_web = False
+is_featured = False
+is_able_to_autoresolve = False
+is_demo_alert_enabled = False
+
+description = None
+
+# Default templates
+slack_title = """\
+*<{{ grafana_oncall_link }}|#{{ grafana_oncall_incident_id }} {{ payload.get("title", "Title undefined (check Slack Title Template)") }}>* via {{ integration_name }}
+{% if source_link %}
+ (*<{{ source_link }}|source>*)
+{%- endif %}"""
+
+slack_message = "{{ payload.message }}"
+
+slack_image_url = "{{ payload.image_url }}"
+
+web_title = '{{ payload.get("title", "Title undefined (check Web Title Template)") }}'
+
+web_message = slack_message
+
+web_image_url = slack_image_url
+
+sms_title = web_title
+
+phone_call_title = web_title
+
+email_title = web_title
+
+email_message = slack_message
+
+telegram_title = sms_title
+
+telegram_message = slack_message
+
+telegram_image_url = slack_image_url
+
+source_link = "{{ payload.link_to_upstream_details }}"
+
+grouping_id = '{{ payload.get("title", "")}}'
+
+resolve_condition = '{{ payload.get("state", "").upper() == "OK" }}'
+
+acknowledge_condition = None
+
+group_verbose_name = web_title
diff --git a/engine/apps/integrations/metadata/configuration/maintenance.py b/engine/apps/integrations/metadata/configuration/maintenance.py
new file mode 100644
index 0000000000..957e53e9c5
--- /dev/null
+++ b/engine/apps/integrations/metadata/configuration/maintenance.py
@@ -0,0 +1,53 @@
+# Main
+enabled = True
+title = "Maintenance"
+slug = "maintenance"
+short_description = None
+description = None
+is_displayed_on_web = False
+is_featured = False
+is_able_to_autoresolve = False
+is_demo_alert_enabled = True
+
+description = None
+
+# Default templates
+slack_title = """\
+*<{{ grafana_oncall_link }}|#{{ grafana_oncall_incident_id }} {{ payload.get("title", "Maintenance") }}>* via {{ integration_name }}
+{% if source_link %}
+ (*<{{ source_link }}|source>*)
+{%- endif %}"""
+
+slack_message = "{{ payload.message }}"
+
+slack_image_url = "{{ payload.image_url }}"
+
+web_title = '{{ payload.get("title", "Maintenance") }}'
+
+web_message = slack_message
+
+web_image_url = slack_image_url
+
+sms_title = web_title
+
+phone_call_title = sms_title
+
+email_title = web_title
+
+email_message = slack_message
+
+telegram_title = sms_title
+
+telegram_message = slack_message
+
+telegram_image_url = slack_image_url
+
+source_link = None
+
+grouping_id = None
+
+resolve_condition = None
+
+acknowledge_condition = None
+
+group_verbose_name = "Incident"
diff --git a/engine/apps/integrations/metadata/configuration/manual.py b/engine/apps/integrations/metadata/configuration/manual.py
new file mode 100644
index 0000000000..bf1825deff
--- /dev/null
+++ b/engine/apps/integrations/metadata/configuration/manual.py
@@ -0,0 +1,77 @@
+# Main
+enabled = True
+title = "Manual"
+slug = "manual"
+short_description = None
+description = None
+is_displayed_on_web = False
+is_featured = False
+is_able_to_autoresolve = False
+is_demo_alert_enabled = False
+
+description = None
+
+# Default templates
+slack_title = """{% set metadata = payload.view.private_metadata %}
+{%-if "message" in metadata -%}
+{% set title = "Message from @" + metadata.author_username %}
+{%- else -%}
+{% set title = payload.view.state["values"].TITLE_INPUT.FinishCreateIncidentViewStep.value %}
+{%- endif -%}
+*<{{ grafana_oncall_link }}|#{{ grafana_oncall_incident_id }} {{ title }}>* via {{ integration_name }}
+{% if source_link %}
+ (*<{{ source_link }}|source>*)
+{%- endif %}
+"""
+
+slack_message = """{% set metadata = payload.view.private_metadata %}
+{% if "message" in metadata -%}
+{{ metadata.message.text }}
+
+
+{%- else -%}
+{{ payload.view.state["values"].MESSAGE_INPUT.FinishCreateIncidentViewStep.value }}
+
+created by {{ payload.user.name }}
+{%- endif -%}"""
+
+slack_image_url = None
+
+web_title = """{% set metadata = payload.view.private_metadata %}
+{%-if "message" in metadata -%}
+{{ "Message from @" + metadata.author_username }}
+{%- else -%}
+{{ payload.view.state["values"].TITLE_INPUT.FinishCreateIncidentViewStep.value }}
+{%- endif -%}"""
+
+web_message = slack_message
+
+web_image_url = slack_image_url
+
+sms_title = web_title
+
+phone_call_title = sms_title
+
+email_title = web_title
+
+email_message = slack_message
+
+telegram_title = sms_title
+
+telegram_message = slack_message
+
+telegram_image_url = slack_image_url
+
+source_link = """\
+{% set metadata = payload.view.private_metadata %}
+{%- if "message" in metadata %}
+https://{{ payload.team.domain }}.slack.com/archives/{{ payload.channel.id }}/{{ payload.message.ts }}
+{% endif -%}"""
+
+grouping_id = """{{ payload }}"""
+
+resolve_condition = None
+
+acknowledge_condition = None
+
+group_verbose_name = web_title
diff --git a/engine/apps/integrations/metadata/configuration/slack_channel.py b/engine/apps/integrations/metadata/configuration/slack_channel.py
new file mode 100644
index 0000000000..d01c186bc3
--- /dev/null
+++ b/engine/apps/integrations/metadata/configuration/slack_channel.py
@@ -0,0 +1,44 @@
+# Main
+enabled = True
+title = "Slack Channel"
+slug = "slack_channel"
+short_description = None
+description = None
+is_displayed_on_web = False
+is_featured = False
+is_able_to_autoresolve = False
+is_demo_alert_enabled = False
+
+description = None
+
+# Default templates
+slack_title = """\
+{% if source_link -%}
+*<{{ source_link }}|<#{{ payload.get("channel", "") }}>>*
+{%- else -%}
+<#{{ payload.get("channel", "") }}>
+{%- endif %}"""
+
+web_title = """\
+{% if source_link -%}
+[#{{ grafana_oncall_incident_id }}]{{ source_link }}) <#{{ payload.get("channel", "") }}>>*
+{%- else -%}
+*#{{ grafana_oncall_incident_id }}* <#{{ payload.get("channel", "") }}>
+{%- endif %}"""
+
+telegram_title = """\
+{% if source_link -%}
+#{{ grafana_oncall_incident_id }} {{ payload.get("channel", "") }}
+{%- else -%}
+*#{{ grafana_oncall_incident_id }}* <#{{ payload.get("channel", "") }}>
+{%- endif %}"""
+
+grouping_id = '{{ payload.get("ts", "") }}'
+
+resolve_condition = None
+
+acknowledge_condition = None
+
+group_verbose_name = '<#{{ payload.get("channel", "") }}>'
+
+source_link = '{{ payload.get("amixr_mixin", {}).get("permalink", "")}}'
diff --git a/engine/apps/integrations/metadata/configuration/webhook.py b/engine/apps/integrations/metadata/configuration/webhook.py
new file mode 100644
index 0000000000..ea18fab7ca
--- /dev/null
+++ b/engine/apps/integrations/metadata/configuration/webhook.py
@@ -0,0 +1,65 @@
+# Main
+enabled = True
+title = "Webhook"
+slug = "webhook"
+short_description = None
+description = None
+is_featured = False
+is_displayed_on_web = True
+is_able_to_autoresolve = True
+is_demo_alert_enabled = True
+
+# Default templates
+slack_title = """\
+*<{{ grafana_oncall_link }}|#{{ grafana_oncall_incident_id }} Incident>* via {{ integration_name }}
+{% if source_link %}
+ (*<{{ source_link }}|source>*)
+{%- endif %}
+
+"""
+
+slack_message = "```{{ payload|tojson_pretty }}```"
+
+slack_image_url = None
+
+web_title = "Incident"
+
+web_message = """\
+```
+{{ payload|tojson_pretty }}
+```
+"""
+
+web_image_url = slack_image_url
+
+sms_title = web_title
+
+phone_call_title = sms_title
+
+email_title = web_title
+
+email_message = "{{ payload|tojson_pretty }}"
+
+telegram_title = sms_title
+
+telegram_message = "{{ payload|tojson_pretty }}
"
+
+telegram_image_url = slack_image_url
+
+source_link = "{{ payload.url }}"
+
+grouping_id = "{{ payload }}"
+
+resolve_condition = """\
+{%- if "is_amixr_heartbeat_restored" in payload -%}
+{# We don't know the payload format from your integration. #}
+{# The heartbeat alerts will go here so we check for our own key #}
+{{ payload["is_amixr_heartbeat_restored"] }}
+{%- else -%}
+{{ payload.get("state", "").upper() == "OK" }}'
+{%- endif %}"""
+acknowledge_condition = None
+
+group_verbose_name = web_title
+
+example_payload = {"message": "This alert was sent by user for the demonstration purposes"}
diff --git a/engine/apps/integrations/metadata/heartbeat/__init__.py b/engine/apps/integrations/metadata/heartbeat/__init__.py
new file mode 100644
index 0000000000..1076b3e60a
--- /dev/null
+++ b/engine/apps/integrations/metadata/heartbeat/__init__.py
@@ -0,0 +1,13 @@
+"""
+This module provides payloads for heartbeat alerts.
+Files from this modules are integrations for which heartbeat is available (if filename not starts with _).
+Filename MUST match INTEGRATION_TO_REVERSE_URL_MAP.
+"""
+
+import apps.integrations.metadata.heartbeat.alertmanager # noqa
+import apps.integrations.metadata.heartbeat.elastalert # noqa
+import apps.integrations.metadata.heartbeat.formatted_webhook # noqa
+import apps.integrations.metadata.heartbeat.grafana # noqa
+import apps.integrations.metadata.heartbeat.prtg # noqa
+import apps.integrations.metadata.heartbeat.webhook # noqa
+import apps.integrations.metadata.heartbeat.zabbix # noqa
diff --git a/engine/apps/integrations/metadata/heartbeat/_heartbeat_text_creator.py b/engine/apps/integrations/metadata/heartbeat/_heartbeat_text_creator.py
new file mode 100644
index 0000000000..8e0e030c5f
--- /dev/null
+++ b/engine/apps/integrations/metadata/heartbeat/_heartbeat_text_creator.py
@@ -0,0 +1,66 @@
+from dataclasses import dataclass
+from urllib.parse import urljoin
+
+from django.conf import settings
+
+
+@dataclass
+class IntegrationHeartBeatText:
+ heartbeat_expired_title: str = "heartbeat_expired"
+ heartbeat_expired_message: str = "heartbeat_expired"
+ heartbeat_restored_title: str = "hearbeat_restored"
+ heartbeat_restored_message: str = "heartbeat_restored"
+ heartbeat_instruction_template: str = None
+
+
+class HearBeatTextCreator:
+ def __init__(self, integration_verbal):
+ self.integration_verbal = integration_verbal.capitalize()
+
+ def get_heartbeat_texts(self):
+ return IntegrationHeartBeatText(
+ heartbeat_expired_title=self._get_heartbeat_expired_title(),
+ heartbeat_expired_message=self._get_heartbeat_expired_message(),
+ heartbeat_restored_title=self._get_heartbeat_restored_title(),
+ heartbeat_restored_message=self._get_heartbeat_restored_message(),
+ heartbeat_instruction_template=self._get_heartbeat_instruction_template(),
+ )
+
+ def _get_heartbeat_expired_title(self):
+ heartbeat_expired_title = f"{self.integration_verbal} heartbeat is missing"
+ return heartbeat_expired_title
+
+ def _get_heartbeat_expired_message(self):
+ heartbeat_docs_url = urljoin(settings.DOCS_URL, "/#/integrations/heartbeat")
+ heartbeat_expired_message = (
+ f"Amixr was waiting for a heartbeat from {self.integration_verbal}. "
+ f"Heartbeat is missing. That could happen because {self.integration_verbal} stopped or"
+ f" there are connectivity issues between Amixr and {self.integration_verbal}. "
+ f"Read more in Amixr docs: {heartbeat_docs_url}"
+ )
+ return heartbeat_expired_message
+
+ def _get_heartbeat_restored_title(self):
+ heartbeat_expired_title = f"{self.integration_verbal} heartbeat restored"
+ return heartbeat_expired_title
+
+ def _get_heartbeat_restored_message(self):
+ heartbeat_expired_message = f"Amixr received a signal from {self.integration_verbal}. Heartbeat restored."
+ return heartbeat_expired_message
+
+ def _get_heartbeat_instruction_template(self):
+ return f"heartbeat_instructions/{self.integration_verbal.lower()}.html"
+
+
+class HearBeatTextCreatorForTitleGrouping(HearBeatTextCreator):
+ """
+ Some integrations (Grafana, AlertManager) have default grouping template based on title
+ """
+
+ def _get_heartbeat_expired_title(self):
+ heartbeat_expired_title = "Amixr heartbeat"
+ return heartbeat_expired_title
+
+ def _get_heartbeat_restored_title(self):
+ heartbeat_expired_title = "Amixr heartbeat"
+ return heartbeat_expired_title
diff --git a/engine/apps/integrations/metadata/heartbeat/alertmanager.py b/engine/apps/integrations/metadata/heartbeat/alertmanager.py
new file mode 100644
index 0000000000..c0979787ea
--- /dev/null
+++ b/engine/apps/integrations/metadata/heartbeat/alertmanager.py
@@ -0,0 +1,35 @@
+from pathlib import PurePath
+
+from apps.integrations.metadata.heartbeat._heartbeat_text_creator import HearBeatTextCreatorForTitleGrouping
+
+integration_verbal = PurePath(__file__).stem
+creator = HearBeatTextCreatorForTitleGrouping(integration_verbal)
+heartbeat_text = creator.get_heartbeat_texts()
+
+heartbeat_instruction_template = heartbeat_text.heartbeat_instruction_template
+
+heartbeat_expired_title = heartbeat_text.heartbeat_expired_title
+heartbeat_expired_message = heartbeat_text.heartbeat_expired_message
+
+heartbeat_expired_payload = {
+ "endsAt": "",
+ "labels": {"alertname": heartbeat_expired_title},
+ "status": "firing",
+ "startsAt": "",
+ "annotations": {
+ "message": heartbeat_expired_message,
+ },
+ "generatorURL": None,
+}
+
+heartbeat_restored_title = heartbeat_text.heartbeat_restored_title
+heartbeat_restored_message = heartbeat_text.heartbeat_restored_message
+
+heartbeat_restored_payload = {
+ "endsAt": "",
+ "labels": {"alertname": heartbeat_restored_title},
+ "status": "resolved",
+ "startsAt": "",
+ "annotations": {"message": heartbeat_restored_message},
+ "generatorURL": None,
+}
diff --git a/engine/apps/integrations/metadata/heartbeat/elastalert.py b/engine/apps/integrations/metadata/heartbeat/elastalert.py
new file mode 100644
index 0000000000..7e76f8fb3e
--- /dev/null
+++ b/engine/apps/integrations/metadata/heartbeat/elastalert.py
@@ -0,0 +1,37 @@
+from pathlib import PurePath
+
+from apps.integrations.metadata.heartbeat._heartbeat_text_creator import HearBeatTextCreator
+
+integration_verbal = PurePath(__file__).stem
+creator = HearBeatTextCreator(integration_verbal)
+heartbeat_text = creator.get_heartbeat_texts()
+
+heartbeat_instruction_template = heartbeat_text.heartbeat_instruction_template
+
+heartbeat_expired_title = heartbeat_text.heartbeat_expired_title
+heartbeat_expired_message = heartbeat_text.heartbeat_expired_message
+
+heartbeat_expired_payload = {
+ "alert_uid": "0eaf37c8-e1eb-4714-b79e-7c648b6a96fa",
+ "title": heartbeat_expired_title,
+ "image_url": None,
+ "state": "alerting",
+ "link_to_upstream_details": None,
+ "message": heartbeat_expired_message,
+ "is_amixr_heartbeat": True,
+ "is_amixr_heartbeat_restored": False,
+}
+
+heartbeat_restored_title = heartbeat_text.heartbeat_restored_title
+heartbeat_restored_message = heartbeat_text.heartbeat_restored_message
+
+heartbeat_restored_payload = {
+ "alert_uid": "0eaf37c8-e1eb-4714-b79e-7c648b6a96fa",
+ "title": heartbeat_restored_title,
+ "image_url": None,
+ "state": "ok",
+ "link_to_upstream_details": None,
+ "message": heartbeat_restored_message,
+ "is_amixr_heartbeat": True,
+ "is_amixr_heartbeat_restored": True,
+}
diff --git a/engine/apps/integrations/metadata/heartbeat/formatted_webhook.py b/engine/apps/integrations/metadata/heartbeat/formatted_webhook.py
new file mode 100644
index 0000000000..dc0beea624
--- /dev/null
+++ b/engine/apps/integrations/metadata/heartbeat/formatted_webhook.py
@@ -0,0 +1,37 @@
+from pathlib import PurePath
+
+from apps.integrations.metadata.heartbeat._heartbeat_text_creator import HearBeatTextCreator
+
+integration_verbal = PurePath(__file__).stem
+creator = HearBeatTextCreator(integration_verbal)
+heartbeat_text = creator.get_heartbeat_texts()
+
+heartbeat_instruction_template = heartbeat_text.heartbeat_instruction_template
+
+heartbeat_expired_title = heartbeat_text.heartbeat_expired_title
+heartbeat_expired_message = heartbeat_text.heartbeat_expired_message
+
+heartbeat_expired_payload = {
+ "alert_uid": "fbdad422-b27d-454a-8553-84d1517e0005",
+ "title": heartbeat_expired_title,
+ "image_url": None,
+ "state": "alerting",
+ "link_to_upstream_details": None,
+ "message": heartbeat_expired_message,
+ "is_amixr_heartbeat": True,
+ "is_amixr_heartbeat_restored": False,
+}
+
+heartbeat_restored_title = heartbeat_text.heartbeat_restored_title
+heartbeat_restored_message = heartbeat_text.heartbeat_restored_message
+
+heartbeat_restored_payload = {
+ "alert_uid": "fbdad422-b27d-454a-8553-84d1517e0005",
+ "title": heartbeat_restored_title,
+ "image_url": None,
+ "state": "ok",
+ "link_to_upstream_details": None,
+ "message": heartbeat_restored_message,
+ "is_amixr_heartbeat": True,
+ "is_amixr_heartbeat_restored": True,
+}
diff --git a/engine/apps/integrations/metadata/heartbeat/grafana.py b/engine/apps/integrations/metadata/heartbeat/grafana.py
new file mode 100644
index 0000000000..05abb8386e
--- /dev/null
+++ b/engine/apps/integrations/metadata/heartbeat/grafana.py
@@ -0,0 +1,31 @@
+from pathlib import PurePath
+
+from apps.integrations.metadata.heartbeat._heartbeat_text_creator import HearBeatTextCreatorForTitleGrouping
+
+integration_verbal = PurePath(__file__).stem
+creator = HearBeatTextCreatorForTitleGrouping(integration_verbal)
+heartbeat_text = creator.get_heartbeat_texts()
+
+heartbeat_instruction_template = heartbeat_text.heartbeat_instruction_template
+
+heartbeat_expired_title = heartbeat_text.heartbeat_expired_title
+heartbeat_expired_message = heartbeat_text.heartbeat_expired_message
+
+heartbeat_expired_payload = {
+ "state": "alerting",
+ "title": heartbeat_expired_title,
+ "message": heartbeat_expired_message,
+ "is_amixr_heartbeat": True,
+ "is_amixr_heartbeat_restored": False,
+}
+
+heartbeat_restored_title = f"[OK] {heartbeat_text.heartbeat_restored_title}"
+heartbeat_restored_message = heartbeat_text.heartbeat_restored_message
+
+heartbeat_restored_payload = {
+ "state": "ok",
+ "title": heartbeat_restored_title,
+ "message": heartbeat_restored_message,
+ "is_amixr_heartbeat": True,
+ "is_amixr_heartbeat_restored": True,
+}
diff --git a/engine/apps/integrations/metadata/heartbeat/prtg.py b/engine/apps/integrations/metadata/heartbeat/prtg.py
new file mode 100644
index 0000000000..21c8d80ce6
--- /dev/null
+++ b/engine/apps/integrations/metadata/heartbeat/prtg.py
@@ -0,0 +1,37 @@
+from pathlib import PurePath
+
+from apps.integrations.metadata.heartbeat._heartbeat_text_creator import HearBeatTextCreator
+
+integration_verbal = PurePath(__file__).stem
+creator = HearBeatTextCreator(integration_verbal)
+heartbeat_text = creator.get_heartbeat_texts()
+
+heartbeat_instruction_template = heartbeat_text.heartbeat_instruction_template
+
+heartbeat_expired_title = heartbeat_text.heartbeat_expired_title
+heartbeat_expired_message = heartbeat_text.heartbeat_expired_message
+
+heartbeat_expired_payload = {
+ "alert_uid": "5498ad08-f094-4dff-888e-51f8cac76eab",
+ "title": heartbeat_expired_title,
+ "image_url": None,
+ "state": "alerting",
+ "link_to_upstream_details": None,
+ "message": heartbeat_expired_message,
+ "is_amixr_heartbeat": True,
+ "is_amixr_heartbeat_restored": False,
+}
+
+heartbeat_restored_title = heartbeat_text.heartbeat_restored_title
+heartbeat_restored_message = heartbeat_text.heartbeat_restored_message
+
+heartbeat_restored_payload = {
+ "alert_uid": "5498ad08-f094-4dff-888e-51f8cac76eab",
+ "title": heartbeat_restored_title,
+ "image_url": None,
+ "state": "ok",
+ "link_to_upstream_details": None,
+ "message": heartbeat_restored_message,
+ "is_amixr_heartbeat": True,
+ "is_amixr_heartbeat_restored": True,
+}
diff --git a/engine/apps/integrations/metadata/heartbeat/webhook.py b/engine/apps/integrations/metadata/heartbeat/webhook.py
new file mode 100644
index 0000000000..510b30faa9
--- /dev/null
+++ b/engine/apps/integrations/metadata/heartbeat/webhook.py
@@ -0,0 +1,38 @@
+from pathlib import PurePath
+
+from apps.integrations.metadata.heartbeat._heartbeat_text_creator import HearBeatTextCreator
+
+integration_verbal = PurePath(__file__).stem
+creator = HearBeatTextCreator(integration_verbal)
+heartbeat_text = creator.get_heartbeat_texts()
+
+heartbeat_instruction_template = heartbeat_text.heartbeat_instruction_template
+
+heartbeat_expired_title = heartbeat_text.heartbeat_expired_title
+heartbeat_expired_message = heartbeat_text.heartbeat_expired_message
+
+
+heartbeat_expired_payload = {
+ "alert_uid": "7973c835-ff3f-46e4-9444-06df127b6f8e",
+ "title": heartbeat_expired_title,
+ "image_url": None,
+ "state": "alerting",
+ "link_to_upstream_details": None,
+ "message": heartbeat_expired_message,
+ "is_amixr_heartbeat": True,
+ "is_amixr_heartbeat_restored": False,
+}
+
+heartbeat_restored_title = heartbeat_text.heartbeat_restored_title
+heartbeat_restored_message = heartbeat_text.heartbeat_restored_message
+
+heartbeat_restored_payload = {
+ "alert_uid": "7973c835-ff3f-46e4-9444-06df127b6f8e",
+ "title": heartbeat_restored_title,
+ "image_url": None,
+ "state": "ok",
+ "link_to_upstream_details": None,
+ "message": heartbeat_restored_message,
+ "is_amixr_heartbeat": True,
+ "is_amixr_heartbeat_restored": True,
+}
diff --git a/engine/apps/integrations/metadata/heartbeat/zabbix.py b/engine/apps/integrations/metadata/heartbeat/zabbix.py
new file mode 100644
index 0000000000..add62bafb0
--- /dev/null
+++ b/engine/apps/integrations/metadata/heartbeat/zabbix.py
@@ -0,0 +1,37 @@
+from pathlib import PurePath
+
+from apps.integrations.metadata.heartbeat._heartbeat_text_creator import HearBeatTextCreator
+
+integration_verbal = PurePath(__file__).stem
+creator = HearBeatTextCreator(integration_verbal)
+heartbeat_text = creator.get_heartbeat_texts()
+
+heartbeat_instruction_template = heartbeat_text.heartbeat_instruction_template
+
+heartbeat_expired_title = heartbeat_text.heartbeat_expired_title
+heartbeat_expired_message = heartbeat_text.heartbeat_expired_message
+
+heartbeat_expired_payload = {
+ "alert_uid": "191eac7a-d7c1-43a0-8821-eef5afe41367",
+ "title": heartbeat_expired_title,
+ "image_url": None,
+ "state": "alerting",
+ "link_to_upstream_details": None,
+ "message": heartbeat_expired_message,
+ "is_amixr_heartbeat": True,
+ "is_amixr_heartbeat_restored": False,
+}
+
+heartbeat_restored_title = heartbeat_text.heartbeat_restored_title
+heartbeat_restored_message = heartbeat_text.heartbeat_restored_message
+
+heartbeat_restored_payload = {
+ "alert_uid": "191eac7a-d7c1-43a0-8821-eef5afe41367",
+ "title": heartbeat_restored_title,
+ "image_url": None,
+ "state": "ok",
+ "link_to_upstream_details": None,
+ "message": heartbeat_restored_message,
+ "is_amixr_heartbeat": True,
+ "is_amixr_heartbeat_restored": True,
+}
diff --git a/engine/apps/integrations/mixins/__init__.py b/engine/apps/integrations/mixins/__init__.py
new file mode 100644
index 0000000000..f34c1d41d5
--- /dev/null
+++ b/engine/apps/integrations/mixins/__init__.py
@@ -0,0 +1,7 @@
+from .alert_channel_defining_mixin import AlertChannelDefiningMixin # noqa: F401
+from .browsable_instruction_mixin import BrowsableInstructionMixin # noqa: F401
+from .ratelimit_mixin import ( # noqa: F401
+ IntegrationHeartBeatRateLimitMixin,
+ IntegrationRateLimitMixin,
+ is_ratelimit_ignored,
+)
diff --git a/engine/apps/integrations/mixins/alert_channel_defining_mixin.py b/engine/apps/integrations/mixins/alert_channel_defining_mixin.py
new file mode 100644
index 0000000000..3e1cc25709
--- /dev/null
+++ b/engine/apps/integrations/mixins/alert_channel_defining_mixin.py
@@ -0,0 +1,81 @@
+import logging
+from time import perf_counter
+
+from django.apps import apps
+from django.core import serializers
+from django.core.cache import cache
+from django.core.exceptions import PermissionDenied
+from django.db import OperationalError
+
+logger = logging.getLogger(__name__)
+
+
+class AlertChannelDefiningMixin(object):
+ """
+ Mixin is defining "alert chanel" used for this request, gathers Slack Team and Chanel to fulfill "request".
+ To make it easy to access them in ViewSets.
+ """
+
+ CACHE_KEY_DB_FALLBACK = "cached_alert_receive_channels_db_fallback" # Key for caching channels as a DB fallback
+ CACHE_DB_FALLBACK_OBSOLETE_KEY = CACHE_KEY_DB_FALLBACK + "_obsolete_key" # Used as a timer for re-caching
+ CACHE_DB_FALLBACK_REFRESH_INTERVAL = 180
+
+ CACHE_KEY_SHORT_TERM = "cached_alert_receive_channels_short_term" # Key for caching channels to reduce DB load
+ CACHE_SHORT_TERM_TIMEOUT = 5
+
+ def dispatch(self, *args, **kwargs):
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+ logger.info("AlertChannelDefiningMixin started")
+ start = perf_counter()
+ alert_receive_channel = None
+ try:
+ # Trying to define from short-term cache
+ cache_key_short_term = self.CACHE_KEY_SHORT_TERM + "_" + str(kwargs["alert_channel_key"])
+ cached_alert_receive_channel_raw = cache.get(cache_key_short_term)
+ if cached_alert_receive_channel_raw is not None:
+ alert_receive_channel = next(serializers.deserialize("json", cached_alert_receive_channel_raw)).object
+
+ if alert_receive_channel is None:
+ # Trying to define channel from DB
+ alert_receive_channel = AlertReceiveChannel.objects.get(token=kwargs["alert_channel_key"])
+ # Update short term cache
+ serialized = serializers.serialize("json", [alert_receive_channel])
+ cache.set(cache_key_short_term, serialized, self.CACHE_SHORT_TERM_TIMEOUT)
+
+ # Update cached channels
+ if cache.get(self.CACHE_DB_FALLBACK_OBSOLETE_KEY) is None:
+ cache.set(self.CACHE_DB_FALLBACK_OBSOLETE_KEY, True, self.CACHE_DB_FALLBACK_REFRESH_INTERVAL)
+ self.update_alert_receive_channel_cache()
+ except AlertReceiveChannel.DoesNotExist:
+ raise PermissionDenied("Integration key was not found. Permission denied.")
+ except OperationalError:
+ logger.info("Cannot connect to database, using cache to consume alerts!")
+
+ # Searching for a channel in a cache
+ if cache.get(self.CACHE_KEY_DB_FALLBACK):
+ for obj in serializers.deserialize("json", cache.get(self.CACHE_KEY_DB_FALLBACK)):
+ if obj.object.token == kwargs["alert_channel_key"]:
+ alert_receive_channel = obj.object
+
+ if alert_receive_channel is None:
+ raise PermissionDenied("Integration key was not found in cache. Permission denied.")
+
+ else:
+ logger.info("Cache is empty!")
+ raise
+
+ del kwargs["alert_channel_key"]
+ kwargs["alert_receive_channel"] = alert_receive_channel
+
+ request = args[0]
+ request.alert_receive_channel = alert_receive_channel
+ finish = perf_counter()
+ logger.info(f"AlertChannelDefiningMixin finished in {finish - start}")
+ return super(AlertChannelDefiningMixin, self).dispatch(*args, **kwargs)
+
+ def update_alert_receive_channel_cache(self):
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+ logger.info("Caching alert receive channels from database.")
+ serialized = serializers.serialize("json", AlertReceiveChannel.objects.all())
+ # Caching forever, re-caching is managed by "obsolete key"
+ cache.set(self.CACHE_KEY_DB_FALLBACK, serialized, timeout=None)
diff --git a/engine/apps/integrations/mixins/browsable_instruction_mixin.py b/engine/apps/integrations/mixins/browsable_instruction_mixin.py
new file mode 100644
index 0000000000..823303befd
--- /dev/null
+++ b/engine/apps/integrations/mixins/browsable_instruction_mixin.py
@@ -0,0 +1,35 @@
+import json
+from urllib.parse import urljoin
+
+from django.conf import settings
+from django.http import HttpResponse
+from django.template import loader
+
+
+class BrowsableInstructionMixin:
+ def get(self, request, alert_receive_channel, *args, **kwargs):
+ template = loader.get_template("integration_link.html")
+ # TODO Create associative array for integrations
+ base_integration_docs_url = urljoin(settings.DOCS_URL, "/#/integrations/")
+ docs_url = f'{base_integration_docs_url}{request.get_full_path().split("/")[3]}'
+ show_button = True
+ if request.get_full_path().split("/")[3] == "amazon_sns":
+ show_button = False
+ source = " ".join(map(lambda x: x.capitalize(), request.get_full_path().split("/")[3].split("_")))
+ if alert_receive_channel.config.example_payload:
+ payload = alert_receive_channel.config.example_payload
+ payload = json.dumps(payload)
+ else:
+ payload = "None"
+ return HttpResponse(
+ template.render(
+ {
+ "request": request,
+ "url": request.get_full_path,
+ "docs_url": docs_url,
+ "payload": payload,
+ "source": source,
+ "show_button": show_button,
+ }
+ )
+ )
diff --git a/engine/apps/integrations/mixins/ratelimit_mixin.py b/engine/apps/integrations/mixins/ratelimit_mixin.py
new file mode 100644
index 0000000000..79e95b9b5f
--- /dev/null
+++ b/engine/apps/integrations/mixins/ratelimit_mixin.py
@@ -0,0 +1,229 @@
+import logging
+from abc import ABC, abstractmethod
+from functools import wraps
+
+from django.apps import apps
+from django.core.cache import cache
+from django.http import HttpRequest, HttpResponse
+from django.views import View
+from ratelimit import ALL
+from ratelimit.exceptions import Ratelimited
+from ratelimit.utils import is_ratelimited
+
+from apps.integrations.tasks import start_notify_about_integration_ratelimit
+
+logger = logging.getLogger(__name__)
+
+
+RATELIMIT_INTEGRATION = 300
+RATELIMIT_TEAM = 900
+RATELIMIT_REASON_INTEGRATION = "channel"
+RATELIMIT_REASON_TEAM = "team"
+
+
+def get_rate_limit_per_channel_key(_, request):
+ """
+ Rate limiting based on AlertReceiveChannel's PK
+ """
+ return str(request.alert_receive_channel.pk)
+
+
+def get_rate_limit_per_team_key(_, request):
+ """
+ Rate limiting based on AlertReceiveChannel's team PK
+ """
+ return str(request.alert_receive_channel.organization_id)
+
+
+def ratelimit(group=None, key=None, rate=None, method=ALL, block=False, reason=None):
+ """
+ This decorator is an updated version of:
+ from ratelimit.decorators import ratelimit
+
+ Because we need to store ratelimit reason.
+ """
+
+ def decorator(fn):
+ @wraps(fn)
+ def _wrapped(*args, **kw):
+ # Work as a CBV method decorator.
+ if isinstance(args[0], HttpRequest):
+ request = args[0]
+ else:
+ request = args[1]
+
+ request.limited = getattr(request, "limited", False)
+ was_limited_before = request.limited
+
+ ratelimited = is_ratelimited(
+ request=request, group=group, fn=fn, key=key, rate=rate, method=method, increment=True
+ )
+
+ # We need to know if it's the first ratelimited request for notification purposes.
+ request.is_first_rate_limited_request = getattr(request, "is_first_rate_limited_request", False)
+ request.ratelimit_reason = getattr(request, "ratelimit_reason", None)
+ request.ratelimit_reason_key = getattr(request, "ratelimit_reason_key", None)
+
+ # This decorator could be executed multiple times per request.
+ # Making sure we don't overwrite this flag.
+ if not request.is_first_rate_limited_request:
+ request.is_first_rate_limited_request = request.limited and not was_limited_before
+
+ # Saving reason only for the first ratelimit occurrence to avoid overwriting.
+ if request.is_first_rate_limited_request:
+ request.ratelimit_reason = reason
+ request.ratelimit_reason_key = None
+ if key is not None:
+ request.ratelimit_reason_key = key(None, request)
+
+ if ratelimited and block:
+ raise Ratelimited()
+ return fn(*args, **kw)
+
+ return _wrapped
+
+ return decorator
+
+
+def is_ratelimit_ignored(alert_receive_channel):
+ DynamicSetting = apps.get_model("base", "DynamicSetting")
+ integration_token_to_ignore_ratelimit = DynamicSetting.objects.get_or_create(
+ name="integration_tokens_to_ignore_ratelimit",
+ defaults={
+ "json_value": [
+ "dummytoken_uniq_1213kj1h3",
+ ]
+ },
+ )[0]
+ return alert_receive_channel.token in integration_token_to_ignore_ratelimit.json_value
+
+
+class RateLimitMixin(ABC, View):
+ def dispatch(self, *args, **kwargs):
+ if self.request.method in self.methods_to_limit:
+ self.execute_rate_limit_with_notification_logic()
+
+ if self.request.limited:
+ try:
+ if not is_ratelimit_ignored(self.request.alert_receive_channel):
+ return self.get_ratelimit_http_response()
+ else:
+ logger.info(f"Token {self.request.alert_receive_channel.token} saved from the ratelimit!")
+ except Exception as e:
+ logger.info(f"Exception in the ratelimit avoidance mechanism! {e}")
+ return self.get_ratelimit_http_response()
+
+ return super().dispatch(*args, **kwargs)
+
+ def get_ratelimit_http_response(self):
+ return HttpResponse(self.ratelimit_text, status=429)
+
+ @property
+ @abstractmethod
+ def ratelimit_text(self):
+ raise NotImplementedError
+
+ def execute_rate_limit_with_notification_logic(self, *args, **kwargs):
+ self.execute_rate_limit(self.request)
+ self.notify()
+
+ @property
+ @abstractmethod
+ def methods_to_limit(self):
+ raise NotImplementedError
+
+ @abstractmethod
+ def notify(self):
+ raise NotImplementedError
+
+ @abstractmethod
+ def execute_rate_limit(self, request):
+ raise NotImplementedError
+
+
+class IntegrationHeartBeatRateLimitMixin(RateLimitMixin, View):
+ TEXT_INTEGRATION_HEARTBEAT = """
+ We received too many heartbeats from integration and had to apply rate limiting.
+ Please don't hesitate to reach out in case you need increased capacity."
+ """
+
+ def notify(self):
+ """
+ It is don't needed to notify about heartbeat limits now
+ """
+ pass
+
+ @ratelimit(
+ key=get_rate_limit_per_channel_key,
+ rate=str(RATELIMIT_INTEGRATION) + "/5m",
+ group="integration",
+ reason=RATELIMIT_REASON_INTEGRATION,
+ )
+ @ratelimit(
+ key=get_rate_limit_per_team_key, rate=str(RATELIMIT_TEAM) + "/5m", group="team", reason=RATELIMIT_REASON_TEAM
+ )
+ def execute_rate_limit(self, *args, **kwargs):
+ pass
+
+ @property
+ def ratelimit_text(self):
+ return self.TEXT_INTEGRATION_HEARTBEAT
+
+ @property
+ def methods_to_limit(self):
+ return {"GET", "POST"}
+
+
+class IntegrationRateLimitMixin(RateLimitMixin, View):
+ TEXT_INTEGRATION = (
+ "Rate-limiting has been applied to your account "
+ "because too many alerts were sent from your {integration} integration. "
+ "Rate-limiting is activated so you will continue to receive alerts from other integrations. "
+ "Read more about rate limits in our docs. "
+ "To increase your capacity, reach out to our support team."
+ )
+
+ TEXT_WORKSPACE = (
+ "Rate-limiting has been applied to your account "
+ "because too many alerts were sent from multiple integrations. "
+ "Read more about rate limits in our docs. "
+ "To increase your capacity, reach out to our support team."
+ )
+
+ @ratelimit(
+ key=get_rate_limit_per_channel_key,
+ rate=str(RATELIMIT_INTEGRATION) + "/5m",
+ group="integration",
+ reason=RATELIMIT_REASON_INTEGRATION,
+ )
+ @ratelimit(
+ key=get_rate_limit_per_team_key, rate=str(RATELIMIT_TEAM) + "/5m", group="team", reason=RATELIMIT_REASON_TEAM
+ )
+ def execute_rate_limit(self, *args, **kwargs):
+ pass
+
+ def notify(self):
+ if self.request.limited and self.request.is_first_rate_limited_request:
+ team_id = self.request.alert_receive_channel.organization_id
+
+ # TODO: post to the other destinations too.
+
+ cache_key = "rate_limit_notification_sent_team_" + str(team_id)
+
+ if cache.get(cache_key) is None:
+ start_notify_about_integration_ratelimit.apply_async((team_id, self.ratelimit_text), expires=60 * 5)
+ cache.set(cache_key, True, 60 * 15)
+ logging.debug(f"Setting rate limit notification no-spam key: {cache_key}")
+
+ @property
+ def ratelimit_text(self):
+ if self.request.ratelimit_reason == RATELIMIT_REASON_INTEGRATION:
+ return self.TEXT_INTEGRATION.format(
+ integration=self.request.alert_receive_channel.verbal_name,
+ )
+ else:
+ return self.TEXT_WORKSPACE
+
+ @property
+ def methods_to_limit(self):
+ return {"POST"}
diff --git a/engine/apps/integrations/tasks.py b/engine/apps/integrations/tasks.py
new file mode 100644
index 0000000000..2897b1e423
--- /dev/null
+++ b/engine/apps/integrations/tasks.py
@@ -0,0 +1,166 @@
+import logging
+import random
+
+from celery import shared_task
+from celery.utils.log import get_task_logger
+from django.apps import apps
+from django.conf import settings
+from django.core.cache import cache
+
+from apps.alerts.models.alert_group_counter import ConcurrentUpdateError
+from apps.alerts.tasks import resolve_alert_group_by_source_if_needed
+from apps.slack.scenarios.scenario_step import SlackAPIException, SlackClientWithErrorHandling
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+from common.custom_celery_tasks.create_alert_base_task import CreateAlertBaseTask
+
+logger = get_task_logger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+@shared_task(
+ base=CreateAlertBaseTask,
+ autoretry_for=(Exception,),
+ retry_backoff=True,
+ max_retries=1 if settings.DEBUG else None,
+)
+def create_alertmanager_alerts(alert_receive_channel_pk, alert, is_demo=False, force_route_id=None):
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+ Alert = apps.get_model("alerts", "Alert")
+
+ alert_receive_channel = AlertReceiveChannel.objects_with_deleted.get(pk=alert_receive_channel_pk)
+ if (
+ alert_receive_channel.deleted_at is not None
+ or alert_receive_channel.integration == AlertReceiveChannel.INTEGRATION_MAINTENANCE
+ ):
+ logger.info(f"AlertReceiveChannel alert ignored if deleted/maintenance")
+ return
+
+ try:
+ alert = Alert.create(
+ title=None,
+ message=None,
+ image_url=None,
+ link_to_upstream_details=None,
+ alert_receive_channel=alert_receive_channel,
+ integration_unique_data=None,
+ raw_request_data=alert,
+ enable_autoresolve=False,
+ is_demo=is_demo,
+ force_route_id=force_route_id,
+ )
+ except ConcurrentUpdateError:
+ # This error is raised when there are concurrent updates on AlertGroupCounter due to optimistic lock on it.
+ # The idea is to not block the worker with a database lock and retry the task in case of concurrent updates.
+ countdown = random.randint(1, 10)
+ create_alertmanager_alerts.apply_async((alert_receive_channel_pk, alert), countdown=countdown)
+ logger.warning(f"Retrying the task gracefully in {countdown} seconds due to ConcurrentUpdateError")
+ return
+
+ if alert_receive_channel.allow_source_based_resolving:
+ task = resolve_alert_group_by_source_if_needed.apply_async((alert.group.pk,), countdown=5)
+ alert.group.active_resolve_calculation_id = task.id
+ alert.group.save(update_fields=["active_resolve_calculation_id"])
+
+ logger.info(f"Created alert {alert.pk} for alert group {alert.group.pk}")
+
+
+@shared_task(
+ base=CreateAlertBaseTask,
+ autoretry_for=(Exception,),
+ retry_backoff=True,
+ max_retries=1 if settings.DEBUG else None,
+)
+def create_alert(
+ title,
+ message,
+ image_url,
+ link_to_upstream_details,
+ alert_receive_channel_pk,
+ integration_unique_data,
+ raw_request_data,
+ is_demo=False,
+ force_route_id=None,
+):
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+ Alert = apps.get_model("alerts", "Alert")
+
+ try:
+ alert_receive_channel = AlertReceiveChannel.objects.get(pk=alert_receive_channel_pk)
+ except AlertReceiveChannel.DoesNotExist:
+ return
+
+ if image_url is not None:
+ image_url = str(image_url)[:299]
+
+ try:
+ alert = Alert.create(
+ title=title,
+ message=message,
+ image_url=image_url,
+ link_to_upstream_details=link_to_upstream_details,
+ alert_receive_channel=alert_receive_channel,
+ integration_unique_data=integration_unique_data,
+ raw_request_data=raw_request_data,
+ force_route_id=force_route_id,
+ is_demo=is_demo,
+ )
+ logger.info(f"Created alert {alert.pk} for alert group {alert.group.pk}")
+ except ConcurrentUpdateError:
+ # This error is raised when there are concurrent updates on AlertGroupCounter due to optimistic lock on it.
+ # The idea is to not block the worker with a database lock and retry the task in case of concurrent updates.
+ countdown = random.randint(1, 10)
+ create_alert.apply_async(
+ (
+ title,
+ message,
+ image_url,
+ link_to_upstream_details,
+ alert_receive_channel_pk,
+ integration_unique_data,
+ raw_request_data,
+ ),
+ countdown=countdown,
+ )
+ logger.warning(f"Retrying the task gracefully in {countdown} seconds due to ConcurrentUpdateError")
+
+
+@shared_dedicated_queue_retry_task()
+def start_notify_about_integration_ratelimit(team_id, text, **kwargs):
+ notify_about_integration_ratelimit_in_slack.apply_async(
+ args=(
+ team_id,
+ text,
+ ),
+ kwargs=kwargs,
+ expires=60 * 5,
+ )
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else 5
+)
+def notify_about_integration_ratelimit_in_slack(organization_id, text, **kwargs):
+ # TODO: Review ratelimits
+ Organization = apps.get_model("user_management", "Organization")
+
+ try:
+ organization = Organization.objects.get(pk=organization_id)
+ except Organization.DoesNotExist:
+ logger.warning(f"Organization {organization_id} does not exist")
+ return
+
+ cache_key = f"notify_about_integration_ratelimit_in_slack_{organization.pk}"
+ if cache.get(cache_key):
+ logger.debug(f"Message was sent recently for organization {organization_id}")
+ return
+ else:
+ cache.set(cache_key, True, 60 * 15) # Set cache before sending message to make sure we don't ratelimit slack
+ slack_team_identity = organization.slack_team_identity
+ if slack_team_identity is not None:
+ try:
+ sc = SlackClientWithErrorHandling(slack_team_identity.bot_access_token)
+ sc.api_call(
+ "chat.postMessage", channel=organization.general_log_channel_id, text=text, team=slack_team_identity
+ )
+ except SlackAPIException as e:
+ logger.warning(f"Slack exception {e} while sending message for organization {organization_id}")
diff --git a/engine/apps/integrations/templates/heartbeat_instructions/alertmanager.html b/engine/apps/integrations/templates/heartbeat_instructions/alertmanager.html
new file mode 100644
index 0000000000..3a7ce38c9b
--- /dev/null
+++ b/engine/apps/integrations/templates/heartbeat_instructions/alertmanager.html
@@ -0,0 +1,41 @@
+This configuration will send an alert once in a minute and if alertmanager stops working amixr will detect that it
+ doesn't work and notify you about that.
+
+ Add the alert generating script to prometheus.yaml file
+ Within prometheus it is trivial to create an expression that we can build a heartbeat with Amixr
+ expr: vector(1)
+ That expression will always return true. Here is an alert that leverages the previous expression to create a
+ heartbeat alert
+
+ groups:
+ - name: meta
+ rules:
+ - alert: heartbeat
+ expr: vector(1)
+ labels:
+ severity: none
+ annotations:
+ description: This is heartbeat alert
+ summary: Alerting Amixr
+
+
+ Add receiver configuration to prometheus.yaml with the unique url from Amixr global
+
+ ...
+ route:
+ ...
+ routes:
+ - match:
+ alertname: heartbeat
+ receiver: 'amixr-heartbeat'
+ group_wait: 0s
+ group_interval: 1m
+ repeat_interval: 50s
+ receivers:
+ - name: 'amixr-heartbeat'
+ webhook_configs:
+ - url: {{ heartbeat_url }}
+ send_resolved: false
+
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/heartbeat_instructions/elastalert.html b/engine/apps/integrations/templates/heartbeat_instructions/elastalert.html
new file mode 100644
index 0000000000..18c2bbf9c2
--- /dev/null
+++ b/engine/apps/integrations/templates/heartbeat_instructions/elastalert.html
@@ -0,0 +1,11 @@
+Add the following rule to ElastAlert
+
+ index: elastalert_status
+ type: any
+ alert: post
+ http_post_url: {{ heartbeat_url }}
+ realert:
+ minutes: 1
+ alert_text: elastalert is still running
+ alert_text_type: alert_text_only
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/heartbeat_instructions/formatted_webhook.html b/engine/apps/integrations/templates/heartbeat_instructions/formatted_webhook.html
new file mode 100644
index 0000000000..4ca9faadf2
--- /dev/null
+++ b/engine/apps/integrations/templates/heartbeat_instructions/formatted_webhook.html
@@ -0,0 +1,16 @@
+
+
+ In command line execute following commands:
+
+ echo 'curl -s {{ heartbeat_url }} > /dev/null' > heartbeat_script.sh
+ chmod +x heartbeat_script.sh
+ crontab -e
+
+
+
+ Add this line to crontab:
+
+ * * * * * /path/to/your/heartbeat_script.sh
+
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/heartbeat_instructions/grafana.html b/engine/apps/integrations/templates/heartbeat_instructions/grafana.html
new file mode 100644
index 0000000000..5b895bc149
--- /dev/null
+++ b/engine/apps/integrations/templates/heartbeat_instructions/grafana.html
@@ -0,0 +1,37 @@
+{% load static %}
+
+
+ 1. In Alerting > Notification channels, click Add channel .
+
+
+
+
+
+ 2. Enter a Name . The Type is webhook, Enter the unique OnCall URL.
+ 3. Select Send reminders and set a 5m (minute) interval.
+
+
+
+
+
+ 4. In a dashboard, create a panel that will generate heartbeat alerts.
+ 5. In the Metrics tab, enter 0 in the query field.
+
+
+
+ 6. In the Alerting tab, enter an alert name, and conditions that allow the alert to always be enabled. Set the alert interval.
+
+
+
+ 7. In Notifications , select the name of your heartbeat channel.
+
+
+
+ 8. Click Save .
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/heartbeat_instructions/prtg.html b/engine/apps/integrations/templates/heartbeat_instructions/prtg.html
new file mode 100644
index 0000000000..4b451fb271
--- /dev/null
+++ b/engine/apps/integrations/templates/heartbeat_instructions/prtg.html
@@ -0,0 +1,3 @@
+We propose to set up PRTG to send heartbeat GET or POST requests to amixr endpoint to detect an outage of your
+ monitoring system. You can write a script which does this and then run the script via an EXE/script Sensor.
+ If you add this Sensor to the Core Server, Amixr will send an alert if the Core Server is offline.
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/heartbeat_instructions/webhook.html b/engine/apps/integrations/templates/heartbeat_instructions/webhook.html
new file mode 100644
index 0000000000..4ca9faadf2
--- /dev/null
+++ b/engine/apps/integrations/templates/heartbeat_instructions/webhook.html
@@ -0,0 +1,16 @@
+
+
+ In command line execute following commands:
+
+ echo 'curl -s {{ heartbeat_url }} > /dev/null' > heartbeat_script.sh
+ chmod +x heartbeat_script.sh
+ crontab -e
+
+
+
+ Add this line to crontab:
+
+ * * * * * /path/to/your/heartbeat_script.sh
+
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/heartbeat_instructions/zabbix.html b/engine/apps/integrations/templates/heartbeat_instructions/zabbix.html
new file mode 100644
index 0000000000..a7490b59d6
--- /dev/null
+++ b/engine/apps/integrations/templates/heartbeat_instructions/zabbix.html
@@ -0,0 +1,33 @@
+{% load static %}
+
+ Open your Zabbix interface
+ We’ll use Zabbix Simple Check to send periodic requests to a special amixr heartbeat endpoint
+ First we would need to create a host. Go to Configuration -> Host Create -> host
+
+
+
+ Fill Host name, Groups, DNS name (app.amixr.io), select DNS and click Save
+
+
+
+ Click Web and create a new Web Scenario
+
+
+
+ Fill the name, interval (10m) and go to the Steps section
+
+
+
+ In the steps section add Name, and the unique url from amixr
+
+
+
+ In the steps section add Name, and the unique url from amixr
+
+
+
diff --git a/engine/apps/integrations/templates/heartbeat_link.html b/engine/apps/integrations/templates/heartbeat_link.html
new file mode 100644
index 0000000000..31c6a34118
--- /dev/null
+++ b/engine/apps/integrations/templates/heartbeat_link.html
@@ -0,0 +1,31 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/html/integration_alertmanager.html b/engine/apps/integrations/templates/html/integration_alertmanager.html
new file mode 100644
index 0000000000..df7d44b0c9
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_alertmanager.html
@@ -0,0 +1,33 @@
+How to start sending alerts to Grafana OnCall from AlertManager
+
+
+ 1. Add the new receiver to the AlertManager configuration file, for example:
+
+ ...
+ route:
+ receiver: 'grafana_oncall'
+ group_by: [alertname, datacenter, app]
+ ...
+ receivers:
+ - name: 'grafana_oncall'
+ webhook_configs:
+ - url: {{ alert_receive_channel.integration_url }}
+ send_resolved: true
+
+
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+
diff --git a/engine/apps/integrations/templates/html/integration_amazon_sns.html b/engine/apps/integrations/templates/html/integration_amazon_sns.html
new file mode 100644
index 0000000000..df5f2540f0
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_amazon_sns.html
@@ -0,0 +1,28 @@
+How to start sending alerts to Grafana OnCall from Amazon SNS
+
+
+ 1. Create a new Topic in
+ https://console.aws.amazon.com/sns
+
+ 2. Open this topic, then create a new subscription
+ 3. Choose the protocol HTTPS
+
+ 4. Add the following webhook URL to the Amazon SNS Endpoint
+ {{ alert_receive_channel.integration_url }}
+
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/html/integration_curler.html b/engine/apps/integrations/templates/html/integration_curler.html
new file mode 100644
index 0000000000..afbccbcc68
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_curler.html
@@ -0,0 +1,11 @@
+More details in our documentation
+
+
+ Go to https://curler.amixr.io
+ Set your website URL and E-Mail
+ Click "Edit"
+ Set "Amixr Webhook":
+ {{ alert_receive_channel.integration_url }}
+
+ Click "Save"
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/html/integration_datadog.html b/engine/apps/integrations/templates/html/integration_datadog.html
new file mode 100644
index 0000000000..abc66addd7
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_datadog.html
@@ -0,0 +1,33 @@
+How to start sending alerts to Grafana OnCall from Datadog
+
+
+ 1. Navigate to the Integrations page from the sidebar
+
+ 2. Search for webhook in the search bar
+ 3. Enter a name for the integration, for example: grafana-oncall-alerts
+
+ 4. Paste the webhook URL, then save
+ {{ alert_receive_channel.integration_url }}
+
+
+ 5. Navigate to the Events page from the sidebar to send the test alert
+
+ 6. Type @webhook-grafana-oncall-alerts test alert
+ 7. Click the post button
+
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/html/integration_demo.html b/engine/apps/integrations/templates/html/integration_demo.html
new file mode 100644
index 0000000000..6678d66e5f
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_demo.html
@@ -0,0 +1 @@
+This is the Demointegration for Slack so no actions required.
diff --git a/engine/apps/integrations/templates/html/integration_elastalert.html b/engine/apps/integrations/templates/html/integration_elastalert.html
new file mode 100644
index 0000000000..a200d43f58
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_elastalert.html
@@ -0,0 +1,22 @@
+How to start sending alerts to Grafana OnCall from ElastAlert
+
+
+ 1. Use the integration URL from above as the ElastAlert webhook
+ {{ alert_receive_channel.integration_url }}
+
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/html/integration_fabric.html b/engine/apps/integrations/templates/html/integration_fabric.html
new file mode 100644
index 0000000000..a4f8486d48
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_fabric.html
@@ -0,0 +1,26 @@
+How to start sending alerts to Grafana OnCall from Fabric
+
+ 1. Go to https://www.fabric.io/settings/apps
+ 2. Choose your application
+ 3. Navigate to Service Hooks -> WebHook
+ 4. Enter URL:
+ {{ alert_receive_channel.integration_url }}
+
+ 5. Click Verify
+ 6. Choose "SEND IMPACT CHANGE ALERTS" and "ALSO SEND NON-FATAL ALERTS"
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+
diff --git a/engine/apps/integrations/templates/html/integration_formatted_webhook.html b/engine/apps/integrations/templates/html/integration_formatted_webhook.html
new file mode 100644
index 0000000000..2f1ea3205f
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_formatted_webhook.html
@@ -0,0 +1,47 @@
+
+ Formatted Webhook is primarily used for custom integrations using scripts.
+ Use any http client, for example curl, to send POST requests with body using the format in the example below:
+
+
+
Body Fields Format:
+
+ alert_uid
[char][not required] - unique alert ID for grouping;
+ title
[char][not required] - title;
+ image_url
[char][not required] - url for image attached to alert;
+ state
[char][not required] - could be "ok" or "alerting", helpful for auto-resolving;
+ link_to_upstream_details
[char][not required] - link back to your monitoring system;
+ message
[char][not required] - alert details;
+
+
+
+Request example:
+
+
+curl -X POST \
+ {{ alert_receive_channel.integration_url }} \
+ -H 'Content-Type: Application/json' \
+ -d '{
+ "alert_uid": "08d6891a-835c-e661-39fa-96b6a9e26552",
+ "title": "The whole system is down",
+ "image_url": "https://upload.wikimedia.org/wikipedia/commons/e/ee/Grumpy_Cat_by_Gage_Skidmore.jpg",
+ "state": "alerting",
+ "link_to_upstream_details": "https://en.wikipedia.org/wiki/Downtime",
+ "message": "Smth happened. Oh no!"
+}'
+
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/html/integration_grafana.html b/engine/apps/integrations/templates/html/integration_grafana.html
new file mode 100644
index 0000000000..84de1a3b2d
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_grafana.html
@@ -0,0 +1,41 @@
+How to start sending alerts to Grafana OnCall from Other Grafana
+
+ To connect the current Grafana stack alerting automatically, please use the Current Grafana .
+
+
+
+ 1. Open your other Grafana instance
+
+
+ 2. Open Alerting Configuration and go to Contact Points
+
+
+ 3. Create a new Contact Point with type webhook
and url
+ {{ alert_receive_channel.integration_url }}
+
+
+ 4. Open Notification Policies and create a New Specific Policy
+
+
+ 5. Choose any Matching labels, or leave them empty to route all the alerts to Grafana OnCall
+
+
+ 6. Choose the Contact point created in step 3
+
+
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+
diff --git a/engine/apps/integrations/templates/html/integration_grafana_alerting.html b/engine/apps/integrations/templates/html/integration_grafana_alerting.html
new file mode 100644
index 0000000000..d54ca521a8
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_grafana_alerting.html
@@ -0,0 +1,62 @@
+Congratulations, you've connected the Grafana Alerting and Grafana OnCall!
+
+ This is the integration with current Grafana Alerting.
+ It already automatically created a new Grafana Alerting Contact Point
and
+ a Specific Route
.
+ If you want to connect the other Grafana Instance please
+ choose the Other Grafana
Integration instead.
+
+
+How to send the Test alert from Grafana Alerting?
+
+
+
+ 1. Open the corresponding Grafana Alerting Contact Point
+
+
+ 2. Use the Test
buton to send an alert to Grafana OnCall
+
+
+
+
+How to choose what alerts to send from Grafana Alerting to Grafana OnCall?
+
+
+
+ 1. Open the corresponding Grafana Alerting Specific Route
+
+
+ 2. All alerts are sent from Grafana Alerting to Grafana OnCall by default,
+ specify Matching Labels to select which alerts to send
+
+
+
+
+What if the Grafana Alerting Contact Point
is missing?
+
+
+
+ 1. May be it was deleted, you can always re-create them manually
+
+
+ 2. Use the following webhook url to create a webhook
+ Contact Point
in Grafana Alerting
+ {{ alert_receive_channel.integration_url }}
+
+
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+
diff --git a/engine/apps/integrations/templates/html/integration_heartbeat.html b/engine/apps/integrations/templates/html/integration_heartbeat.html
new file mode 100644
index 0000000000..d81e79629a
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_heartbeat.html
@@ -0,0 +1,4 @@
+Amixr-based Heartbeat Monitoring allows you to monitor long-lasting processes in your infrastructure or backend and ensure they are finshed in expected timeframe.
+Your unique HeartBeat API link:
+{{ alert_receive_channel.integration_url }}
+More details and API specification in our documentation
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/html/integration_inbound_email.html b/engine/apps/integrations/templates/html/integration_inbound_email.html
new file mode 100644
index 0000000000..a9016c19db
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_inbound_email.html
@@ -0,0 +1,15 @@
+This integration will consume emails from dedicated email address and make incidents from them.
+
+It’s useful for:
+
+ Service desk.
+ Consuming alerts from other systems using emails as a message bus.
+
+Dedicated email address for incidents:
+{{ alert_receive_channel.inbound_email }}
+
+Fields:
+
+ email_subject - alert title;
+ email_body - alert details;
+
diff --git a/engine/apps/integrations/templates/html/integration_kapacitor.html b/engine/apps/integrations/templates/html/integration_kapacitor.html
new file mode 100644
index 0000000000..dc816bb937
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_kapacitor.html
@@ -0,0 +1,22 @@
+How to start sending alerts to Grafana OnCall from Kapacitor
+
+
+ 1. Use the integration url from above as Kapacitor Webhook
+ {{ alert_receive_channel.integration_url }}
+
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/html/integration_manual.html b/engine/apps/integrations/templates/html/integration_manual.html
new file mode 100644
index 0000000000..9c0d6eccb9
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_manual.html
@@ -0,0 +1,7 @@
+
+There are 2 ways to issue an incident manually from Slack:
+
+
+ Send /oncall
or /oncall Title
message to any chat
+ Select a message, choose it’s context menu and click Create a new incident
+
diff --git a/engine/apps/integrations/templates/html/integration_newrelic.html b/engine/apps/integrations/templates/html/integration_newrelic.html
new file mode 100644
index 0000000000..932a2a378e
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_newrelic.html
@@ -0,0 +1,27 @@
+How to start sending alerts to Grafana OnCall from NewRelic
+
+
+ 1. Go to "Alerts".
+ 2. Go to "Notification Channels".
+ 3. Create "Webhook" notification channel.
+
+ 4. Set the following URL:
+ {{ alert_receive_channel.integration_url }}
+
+ 5. Check "Payload type" is JSON.
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/html/integration_pagerduty.html b/engine/apps/integrations/templates/html/integration_pagerduty.html
new file mode 100644
index 0000000000..c57f47de8b
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_pagerduty.html
@@ -0,0 +1,22 @@
+How to start sending alerts to Grafana OnCall from PagerDuty
+
+
+ 1. Use the integration url from above as PagerDuty Webhook
+ {{ alert_receive_channel.integration_url }}
+
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/html/integration_pingdom.html b/engine/apps/integrations/templates/html/integration_pingdom.html
new file mode 100644
index 0000000000..4fd328ffbb
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_pingdom.html
@@ -0,0 +1,27 @@
+How to start sending alerts to Grafana OnCall from Pingdom
+
+
+ 1. Go to https://my.pingdom.com/integrations/settings
+ 2. Click "Add Integration".
+ 3. Type: Webhook. Name: Amixr. URL:
+ {{ alert_receive_channel.integration_url }}
+ 4. Go to "Reports" -> "Uptime" -> "Edit Check".
+ 5. Select Amixr integration in the bottom.
+ 6. Click "Modify Check" to save.
+
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+ >
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/html/integration_prtg.html b/engine/apps/integrations/templates/html/integration_prtg.html
new file mode 100644
index 0000000000..8693c2911b
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_prtg.html
@@ -0,0 +1,98 @@
+
+ PRTG can use the script to send the alerts to Grafana OnCall. Please use the format below
+
+
+
Body Fields Format:
+
+ alert_uid
[char][not required] - unique alert ID for grouping;
+ title
[char][not required] - title;
+ image_url
[char][not required] - url for image attached to alert;
+ state
[char][not required] - could be "ok" or "alerting", helpful for auto-resolving;
+ link_to_upstream_details
[char][not required] - link back to your monitoring system;
+ message
[char][not required] - alert details;
+
+
+
+ps1 script example:
+
+
+# You are very welcome to change this script to fit your needs and formats
+Param(
+ [string]$sensorid,
+ [string]$date,
+ [string]$device,
+ [string]$shortname,
+ [string]$status,
+ [string]$message,
+ [string]$datetime,
+ [string]$linksensor,
+ [string]$url
+)
+
+# PRTG Server
+$PRTGServer = "localhost:8080"
+$PRTGUsername = "amixr"
+$PRTGPasshash = *****
+
+#Directory for logging
+$LogDirectory = "C:\temp\prtg-notifications-msteam.log"
+
+#Acknowledgement Message for alerts ack'd via Teams
+$ackmessage = "Problem has been acknowledged via Amixr."
+
+# the acknowledgement URL
+$ackURL = [string]::Format("{0}/api/acknowledgealarm.htm?id={1}&ackmsg={2}&username={3}&passhash={4}",$PRTGServer,$sensorID,$ackmessage,$PRTGUsername,$PRTGPasshash);
+
+# Autoresolve an alert in Amixr
+if($status -eq "Up")
+{ $state = "ok" }
+ElseIf($status -match "now: Up")
+{ $state = "ok" }
+ElseIf($status -match "Up (was:")
+{ $state = "ok" }
+Else
+{ $state = "alerting" }
+
+$image_datetime = [datetime]::parse($datetime)
+$sdate = $image_datetime.AddHours(-1).ToString("yyyy-MM-dd-HH-mm-ss")
+$edate = $image_datetime.ToString("yyyy-MM-dd-HH-mm-ss")
+
+$image_url = "$PRTGServer/chart.png?type=graph&graphid=-1&avg=0&width=1000&height=400&username=$PRTGUsername&passhash=$PRTGPasshash&id=$sensorid&sdate=$sdate&edate=$edate"
+
+$Body = @{
+ "alert_uid"="$sensorid $date";
+ "title"="$device $shortname $status at $datetime ";
+ "image_url"=$image_url;
+ "state"=$state;
+ "link_to_upstream_details"="$linksensor";
+ "message"="$message";
+ "ack_url_get"="$ackURL"
+} | ConvertTo-Json
+$Body
+
+try
+{ Invoke-RestMethod -uri $url -Method Post -body $Body -ContentType 'application/json; charset=utf-8'; exit 0; }
+Catch
+{
+ $ErrorMessage = $_.Exception.Message
+ (Get-Date).ToString() +" - "+ $ErrorMessage | Out-File -FilePath $LogDirectory -Append
+ exit 2;
+}
+
+
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/html/integration_sentry.html b/engine/apps/integrations/templates/html/integration_sentry.html
new file mode 100644
index 0000000000..6181225e42
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_sentry.html
@@ -0,0 +1,22 @@
+How to start sending alerts to Grafana OnCall from Sentry
+
+
+ 1. Use the integration url from above as Sentry Webhook
+ {{ alert_receive_channel.integration_url }}
+
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/html/integration_sentry_platform.html b/engine/apps/integrations/templates/html/integration_sentry_platform.html
new file mode 100644
index 0000000000..ec72ee5ea1
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_sentry_platform.html
@@ -0,0 +1,6 @@
+More details in our documentation
+
+Amixr and Sentry configuration is simple as that
+Go to Sentry Website
+ Click Accept & Install
button.
+
diff --git a/engine/apps/integrations/templates/html/integration_slack_channel.html b/engine/apps/integrations/templates/html/integration_slack_channel.html
new file mode 100644
index 0000000000..83e6671768
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_slack_channel.html
@@ -0,0 +1,5 @@
+This integration will consume messages from the channel you choose and make incidents from them.
+
+It’s useful for:
+1. Service desk.
+2. Consuming alerts from other systems using Slack as a message bus.
diff --git a/engine/apps/integrations/templates/html/integration_stackdriver.html b/engine/apps/integrations/templates/html/integration_stackdriver.html
new file mode 100644
index 0000000000..560a24cd3c
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_stackdriver.html
@@ -0,0 +1,26 @@
+How to start sending alerts to Grafana OnCall from Stackdriver
+
+
+ 1. Create a notification channel in Stackdriver by navigating to
+ Workspace Settings -> WEBHOOKS -> Add Webhook
+ {{ alert_receive_channel.integration_url }}
+ 2. Create and alert in Stackdriver
+ by navigating to Alerting -> Policies -> Add Policy ->
+ Choose Notification Channel using the channel set up in step 1
+
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/html/integration_uptimerobot.html b/engine/apps/integrations/templates/html/integration_uptimerobot.html
new file mode 100644
index 0000000000..b7a5cb6b65
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_uptimerobot.html
@@ -0,0 +1,51 @@
+How to start sending alerts to Grafana OnCall from Kapacitor
+
+
Open https://uptimerobot.com and log in
+ Go to My Settings
> Add Alert Contact
and set the following fields:
+
+
+ Alert Contact Type: Webhook
+ Friendly Name: Amixr
+ URL to Notify: <{{ alert_receive_channel.integration_url }}>
+
+POST Value (JSON Format):
+
+
+ {
+ "monitorURL": "monitorURL",
+ "monitorFriendlyName": "monitorFriendlyName",
+ "alertType": "alertType",
+ "alertTypeFriendlyName": "alertTypeFriendlyName",
+ "alertDetails": "alertDetails",
+ "alertDuration": "alertDuration",
+ "sslExpiryDate": "sslExpiryDate",
+ "sslExpiryDaysLeft": "sslExpiryDaysLeft"
+ }
+
+ Flag Send as JSON
+Click Save Changes
and Close
+
+Send Test Alert to Amixr
+Click Add New Monitor
+ Monitor Type HTTP(s)
+ Friendly Name Test Amixr
+ Set URL to http://devnull.amixr.io
or any other non-existent domain
+ Click Checkbox next to Amixr Alert Contact (created in the previous step)
+ Click Create Monitor
+
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/html/integration_webhook.html b/engine/apps/integrations/templates/html/integration_webhook.html
new file mode 100644
index 0000000000..174fc21b8b
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_webhook.html
@@ -0,0 +1,36 @@
+
+ Free-format Webhook is mostly used for custom integrations via scrips. Use any http client,
+ e.g. curl to send POST requests with any payload.
+
+
+Request example:
+
+
+curl -X POST \
+ {{ alert_receive_channel.integration_url }} \
+ -H 'Content-Type: Application/json' \
+ -d '{
+ "alert_uid": "08d6891a-835c-e661-39fa-96b6a9e26552",
+ "title": "The whole system is down",
+ "image_url": "https://upload.wikimedia.org/wikipedia/commons/e/ee/Grumpy_Cat_by_Gage_Skidmore.jpg",
+ "state": "alerting",
+ "link_to_upstream_details": "https://en.wikipedia.org/wiki/Downtime",
+ "message": "Smth happened. Oh no!"
+}'
+
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/html/integration_zabbix.html b/engine/apps/integrations/templates/html/integration_zabbix.html
new file mode 100644
index 0000000000..3d84061f17
--- /dev/null
+++ b/engine/apps/integrations/templates/html/integration_zabbix.html
@@ -0,0 +1,72 @@
+
+ Zabbix can use the script to send the alerts to Grafana OnCall. Please use the format below
+
+
+
Body Fields Format:
+
+ alert_uid
[char][not required] - unique alert ID for grouping;
+ title
[char][not required] - title;
+ image_url
[char][not required] - url for image attached to alert;
+ state
[char][not required] - could be "ok" or "alerting", helpful for auto-resolving;
+ link_to_upstream_details
[char][not required] - link back to your monitoring system;
+ message
[char][not required] - alert details;
+
+
+
+Script example:
+
+
+#!/bin/bash
+# This is the modification of original ericos's shell script.
+
+# Get the url ($1), subject ($2), and message ($3)
+url="$1"
+subject="${2//$'\r\n'/'\n'}"
+message="${3//$'\r\n'/'\n'}"
+
+# Alert state depending on the subject indicating whether it is a trigger going in to problem state or recovering
+recoversub='^RECOVER(Y|ED)?$|^OK$|^Resolved.*'
+
+if [[ "$subject" =~ $recoversub ]]; then
+ state='ok'
+else
+ state='alerting'
+fi
+
+payload='{
+ "title": "'${subject}'",
+ "state": "'${state}'",
+ "message": "'${message}'"
+}'
+
+# Alert group identifier from the subject of action. Grouping will not work without AMIXR_GROUP in the action subject
+regex='AMIXR_GROUP: ([a-zA-Z0-9_\"]*)'
+if [[ "$subject" =~ $regex ]]; then
+ alert_uid=${BASH_REMATCH[1]}
+ payload='{
+ "alert_uid": "'${alert_uid}'",
+ "title": "'${subject}'",
+ "state": "'${state}'",
+ "message": "'${message}'"
+ }'
+fi
+
+return=$(curl $url -d "${payload}" -H "Content-Type: application/json" -X POST)
+
+
+
+
+Next steps:
+
+
+ 1. Add the routes and escalations in Escalations settings
+
+
+ 2. Check grouping, auto-resolving, and rendering templates in
+ Alert Templates
Settings
+
+
+ 3. Make sure all the users set up their Personal Notifications
Settings
+ on the Users
Page
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/templates/integration_link.html b/engine/apps/integrations/templates/integration_link.html
new file mode 100644
index 0000000000..1f1a1a57ff
--- /dev/null
+++ b/engine/apps/integrations/templates/integration_link.html
@@ -0,0 +1,83 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
This is a Grafana OnCall integration link.
+
+ This link supposed to be used for pushing alerts from monitoring to Grafana OnCall. Share it with those who are responsible for configuring your monitoring tools. Most likely they know what to do.
+
+ {% if request.is_secure %}https{% else %}http{% endif %}://{{ request.META.HTTP_HOST }}{{ url }}
+
+
+ Copy to clipboard
+
+
+ {% if show_button %}
+
+
+ Simulate incident from {{ source }}
+
+
+ {% endif %}
+
+ Go to the Guide
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/engine/apps/integrations/tests/__init__.py b/engine/apps/integrations/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/integrations/tests/test_heartbeat_metadata.py b/engine/apps/integrations/tests/test_heartbeat_metadata.py
new file mode 100644
index 0000000000..f2e62cf419
--- /dev/null
+++ b/engine/apps/integrations/tests/test_heartbeat_metadata.py
@@ -0,0 +1,18 @@
+from apps.integrations.metadata import heartbeat
+
+
+def test_heartbeat_metadata_presence():
+ necessary_attrs = [
+ "heartbeat_expired_title",
+ "heartbeat_expired_message",
+ "heartbeat_expired_payload",
+ "heartbeat_restored_title",
+ "heartbeat_restored_message",
+ "heartbeat_restored_payload",
+ "heartbeat_instruction_template",
+ ]
+ modules = [x for x in dir(heartbeat) if not x.startswith("_") and x != "apps"]
+ for m in modules:
+ m = getattr(heartbeat, m)
+ for attr in necessary_attrs:
+ assert getattr(m, attr) is not None
diff --git a/engine/apps/integrations/tests/test_ratelimit.py b/engine/apps/integrations/tests/test_ratelimit.py
new file mode 100644
index 0000000000..a713d1ab25
--- /dev/null
+++ b/engine/apps/integrations/tests/test_ratelimit.py
@@ -0,0 +1,99 @@
+from unittest import mock
+
+import pytest
+from django.core.cache import cache
+from django.test import Client
+from django.urls import reverse
+
+from apps.alerts.models import AlertReceiveChannel
+
+
+# Ratelimit keys are stored in cache. Clean it before and after every test to make them idempotent.
+def setup_module(module):
+ cache.clear()
+
+
+def teardown_module(module):
+ cache.clear()
+
+
+@mock.patch("ratelimit.utils._split_rate", return_value=(1, 60))
+@mock.patch("apps.integrations.tasks.create_alert.apply_async", return_value=None)
+@pytest.mark.django_db
+def test_ratelimit_alerts_per_integration(
+ mocked_task,
+ mocked_rate,
+ make_organization,
+ make_alert_receive_channel,
+):
+ organization = make_organization()
+ integration = make_alert_receive_channel(organization, integration=AlertReceiveChannel.INTEGRATION_WEBHOOK)
+ url = reverse(
+ "integrations:universal",
+ kwargs={"integration_type": AlertReceiveChannel.INTEGRATION_WEBHOOK, "alert_channel_key": integration.token},
+ )
+
+ c = Client()
+
+ response = c.post(url, data={"message": "This is the test alert from amixr"})
+ assert response.status_code == 200
+ response = c.post(url, data={"message": "This is the test alert from amixr"})
+ assert response.status_code == 429
+
+ assert mocked_task.call_count == 1
+
+
+@pytest.mark.skip(reason="SQLITE Incompatibility")
+@mock.patch("ratelimit.utils._split_rate", return_value=(1, 60))
+@mock.patch("apps.integrations.tasks.create_alert.apply_async", return_value=None)
+@pytest.mark.django_db
+def test_ratelimit_alerts_per_team(
+ mocked_task,
+ mocked_rate,
+ make_organization,
+ make_alert_receive_channel,
+):
+ organization = make_organization()
+ integration_1 = make_alert_receive_channel(organization, integration=AlertReceiveChannel.INTEGRATION_WEBHOOK)
+ url_1 = reverse("integrations:webhook", kwargs={"alert_channel_key": integration_1.token})
+ integration_2 = make_alert_receive_channel(organization, integration=AlertReceiveChannel.INTEGRATION_WEBHOOK)
+
+ url_2 = reverse("integrations:webhook", kwargs={"alert_channel_key": integration_2.token})
+
+ c = Client()
+
+ response = c.post(url_1, data={"message": "This is the test alert from amixr"})
+ assert response.status_code == 200
+
+ response = c.post(url_2, data={"message": "This is the test alert from amixr"})
+ assert response.status_code == 429
+
+ assert mocked_task.call_count == 1
+
+
+@pytest.mark.skip(reason="SQLITE Incompatibility")
+@mock.patch("ratelimit.utils._split_rate", return_value=(1, 60))
+@mock.patch("apps.heartbeat.tasks.process_heartbeat_task.apply_async", return_value=None)
+@pytest.mark.django_db
+def test_ratelimit_integration_heartbeats(
+ mocked_task,
+ mocked_rate,
+ make_organization,
+ make_alert_receive_channel,
+):
+ organization = make_organization()
+ integration = make_alert_receive_channel(organization, integration=AlertReceiveChannel.INTEGRATION_WEBHOOK)
+ url = reverse("integrations:webhook_heartbeat", kwargs={"alert_channel_key": integration.token})
+
+ c = Client()
+
+ response = c.post(url)
+ assert response.status_code == 200
+
+ response = c.post(url)
+ assert response.status_code == 429
+
+ response = c.get(url)
+ assert response.status_code == 429
+
+ assert mocked_task.call_count == 1
diff --git a/engine/apps/integrations/tests/test_tasks.py b/engine/apps/integrations/tests/test_tasks.py
new file mode 100644
index 0000000000..93e766ab25
--- /dev/null
+++ b/engine/apps/integrations/tests/test_tasks.py
@@ -0,0 +1,31 @@
+import pytest
+
+from apps.alerts.models import Alert, AlertReceiveChannel
+from apps.integrations.tasks import create_alertmanager_alerts
+
+
+@pytest.mark.django_db
+def test_create_alertmanager_alert_deleted_task_no_alert_no_retry(
+ make_organization,
+ make_alert_receive_channel,
+):
+ organization = make_organization()
+ integration = make_alert_receive_channel(organization, integration=AlertReceiveChannel.INTEGRATION_WEBHOOK)
+ integration.delete()
+
+ create_alertmanager_alerts(integration.pk, {})
+
+ assert Alert.objects.count() == 0
+
+
+@pytest.mark.django_db
+def test_create_alertmanager_alert_maintanance_task_no_alert_no_retry(
+ make_organization,
+ make_alert_receive_channel,
+):
+ organization = make_organization()
+ integration = make_alert_receive_channel(organization, integration=AlertReceiveChannel.INTEGRATION_MAINTENANCE)
+
+ create_alertmanager_alerts(integration.pk, {})
+
+ assert Alert.objects.count() == 0
diff --git a/engine/apps/integrations/tests/test_views.py b/engine/apps/integrations/tests/test_views.py
new file mode 100644
index 0000000000..debc7ad6a8
--- /dev/null
+++ b/engine/apps/integrations/tests/test_views.py
@@ -0,0 +1,46 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.alerts.models import AlertReceiveChannel
+
+
+@pytest.mark.django_db
+def test_integration_json_data_too_big(settings, make_organization, make_user, make_alert_receive_channel):
+ settings.DATA_UPLOAD_MAX_MEMORY_SIZE = 50
+
+ organization = make_organization()
+ user = make_user(organization=organization)
+ alert_receive_channel = make_alert_receive_channel(
+ organization=organization,
+ author=user,
+ integration=AlertReceiveChannel.INTEGRATION_ALERTMANAGER,
+ )
+
+ client = APIClient()
+ url = reverse("integrations:alertmanager", kwargs={"alert_channel_key": alert_receive_channel.token})
+
+ data = {"value": "a" * settings.DATA_UPLOAD_MAX_MEMORY_SIZE}
+ response = client.post(url, data, format="json")
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_integration_form_data_too_big(settings, make_organization, make_user, make_alert_receive_channel):
+ settings.DATA_UPLOAD_MAX_MEMORY_SIZE = 50
+
+ organization = make_organization()
+ user = make_user(organization=organization)
+ alert_receive_channel = make_alert_receive_channel(
+ organization=organization,
+ author=user,
+ integration=AlertReceiveChannel.INTEGRATION_ALERTMANAGER,
+ )
+
+ client = APIClient()
+ url = reverse("integrations:alertmanager", kwargs={"alert_channel_key": alert_receive_channel.token})
+
+ data = {"value": "a" * settings.DATA_UPLOAD_MAX_MEMORY_SIZE}
+ response = client.post(url, data, content_type="application/x-www-form-urlencoded")
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
diff --git a/engine/apps/integrations/urls.py b/engine/apps/integrations/urls.py
new file mode 100644
index 0000000000..aba3bab20d
--- /dev/null
+++ b/engine/apps/integrations/urls.py
@@ -0,0 +1,46 @@
+from pathlib import Path
+
+from django.urls import path
+
+from .views import (
+ AlertManagerAPIView,
+ AmazonSNS,
+ GrafanaAlertingAPIView,
+ GrafanaAPIView,
+ HeartBeatAPIView,
+ InboundWebhookEmailView,
+ IntegrationHeartBeatAPIView,
+ UniversalAPIView,
+)
+
+app_name = "integrations"
+
+# Check filenames in integrations/metadata/heartbeat for available integrations.
+p = Path(__file__).parent.absolute()
+PATH_TO_HEARTBEAT_DATA_DIR = p / "metadata/heartbeat"
+INTEGRATIONS_WITH_HEARTBEAT_AVAILABLE = {
+ f.stem
+ for f in Path.iterdir(PATH_TO_HEARTBEAT_DATA_DIR)
+ if Path.is_file(PATH_TO_HEARTBEAT_DATA_DIR / f) and not f.name.startswith("_")
+}
+# Don't forget to update model-url map in apps/alerts/models.py, AlertReceiveChannel, INTEGRATIONS_TO_REVERSE_URL_MAP
+urlpatterns = [
+ path("grafana//", GrafanaAPIView.as_view(), name="grafana"),
+ path("grafana_alerting//", GrafanaAlertingAPIView.as_view(), name="grafana_alerting"),
+ path("alertmanager//", AlertManagerAPIView.as_view(), name="alertmanager"),
+ path("inbound_webhook_email/", InboundWebhookEmailView.as_view(), name="inbound_email"),
+ path("amazon_sns//", AmazonSNS.as_view(), name="amazon_sns"),
+ path("heartbeat//", HeartBeatAPIView.as_view(), name="heartbeat"),
+ path("//", UniversalAPIView.as_view(), name="universal"),
+]
+
+
+def create_heartbeat_path(integration_url):
+ return path(
+ f"{integration_url}//heartbeat/",
+ IntegrationHeartBeatAPIView.as_view(),
+ name=f"{integration_url}_heartbeat",
+ )
+
+
+urlpatterns += [create_heartbeat_path(integration_url) for integration_url in INTEGRATIONS_WITH_HEARTBEAT_AVAILABLE]
diff --git a/engine/apps/integrations/views.py b/engine/apps/integrations/views.py
new file mode 100644
index 0000000000..1aa164b432
--- /dev/null
+++ b/engine/apps/integrations/views.py
@@ -0,0 +1,467 @@
+import json
+import logging
+from urllib.parse import urljoin
+
+from django.apps import apps
+from django.conf import settings
+from django.core.exceptions import PermissionDenied
+from django.db import transaction
+from django.db.utils import IntegrityError
+from django.http import HttpResponse, HttpResponseBadRequest, JsonResponse
+from django.template import loader
+from django.utils import timezone
+from django.utils.decorators import method_decorator
+from django.views.decorators.csrf import csrf_exempt
+from django_sns_view.views import SNSEndpoint
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.alerts.models import AlertReceiveChannel
+from apps.heartbeat.tasks import heartbeat_checkup, process_heartbeat_task
+from apps.integrations.mixins import (
+ AlertChannelDefiningMixin,
+ BrowsableInstructionMixin,
+ IntegrationHeartBeatRateLimitMixin,
+ IntegrationRateLimitMixin,
+ is_ratelimit_ignored,
+)
+from apps.integrations.tasks import create_alert, create_alertmanager_alerts
+from apps.sendgridapp.parse import Parse
+from apps.sendgridapp.permissions import AllowOnlySendgrid
+
+logger = logging.getLogger(__name__)
+
+
+class AmazonSNS(BrowsableInstructionMixin, SNSEndpoint):
+ @method_decorator(csrf_exempt)
+ def dispatch(self, *args, **kwargs):
+ # Cleaning for SNSEndpoint
+ args[0].alert_channel_key = kwargs["alert_channel_key"]
+ del kwargs["alert_channel_key"]
+ # For browserable API
+ if args[0].method == "GET":
+ args = (args[0], args[0].alert_channel_key)
+
+ try:
+ return super(SNSEndpoint, self).dispatch(*args, **kwargs)
+ except Exception as e:
+ print(e)
+ return JsonResponse(status=400, data={})
+
+ def handle_message(self, message, payload):
+ try:
+ alert_receive_channel = AlertReceiveChannel.objects.get(token=self.request.alert_channel_key)
+ except AlertReceiveChannel.DoesNotExist:
+ raise PermissionDenied("Integration key was not found. Permission denied.")
+
+ if type(message) is str:
+ try:
+ message = json.loads(message)
+ except json.JSONDecodeError:
+ message = message
+ if type(message) is dict:
+ # Here we expect CloudWatch or Beanstack payload
+ message_text = "*State: {}*\n".format(message.get("NewStateValue", "NO"))
+ message_text += "Region: {}\n".format(message.get("Region", "Undefined"))
+ if "AlarmDescription" in message and message.get("AlarmDescription"):
+ message_text += "_Description:_ {}\n".format(message.get("AlarmDescription", "Undefined"))
+ message_text += message.get("NewStateReason", "")
+
+ region = payload.get("TopicArn").split(":")[3]
+ if message.get("Trigger", {}).get("Namespace") == "AWS/ElasticBeanstalk":
+ link_to_upstream = "https://console.aws.amazon.com/elasticbeanstalk/home?region={}".format(region)
+ else:
+ link_to_upstream = "https://console.aws.amazon.com/cloudwatch//home?region={}".format(region)
+
+ raw_request_data = message
+ title = message.get("AlarmName", "Alert")
+ else:
+ docs_amazon_sns_url = urljoin(settings.DOCS_URL, "/#/integrations/amazon_sns")
+ title = "Alert"
+ message_text = (
+ "Non-JSON payload received. Please make sure you publish monitoring Alarms to SNS,"
+ f" not logs: {docs_amazon_sns_url}\n" + message
+ )
+ link_to_upstream = None
+ raw_request_data = {"message": message}
+
+ create_alert.apply_async(
+ [],
+ {
+ "title": title,
+ "message": message_text,
+ "image_url": None,
+ "link_to_upstream_details": link_to_upstream,
+ "alert_receive_channel_pk": alert_receive_channel.pk,
+ "integration_unique_data": None,
+ "raw_request_data": raw_request_data,
+ },
+ )
+
+
+class AlertManagerAPIView(
+ BrowsableInstructionMixin,
+ AlertChannelDefiningMixin,
+ IntegrationRateLimitMixin,
+ APIView,
+):
+ def post(self, request, alert_receive_channel):
+ """
+ AlertManager requires super fast response so we create Alerts in Celery Task.
+ Otherwise AlertManager raises `context deadline exceeded` exception.
+ Unfortunately this HTTP timeout is not configurable on AlertManager's side.
+ """
+ if not self.check_integration_type(alert_receive_channel):
+ return HttpResponseBadRequest(
+ f"This url is for integration with {alert_receive_channel.get_integration_display()}. Key is for "
+ + str(alert_receive_channel.get_integration_display())
+ )
+
+ for alert in request.data.get("alerts", []):
+ if settings.DEBUG:
+ create_alertmanager_alerts(alert_receive_channel.pk, alert)
+ else:
+ self.execute_rate_limit_with_notification_logic()
+
+ if self.request.limited and not is_ratelimit_ignored(alert_receive_channel):
+ return self.get_ratelimit_http_response()
+
+ create_alertmanager_alerts.apply_async((alert_receive_channel.pk, alert))
+
+ return Response("Ok.")
+
+ def check_integration_type(self, alert_receive_channel):
+ return alert_receive_channel.integration == AlertReceiveChannel.INTEGRATION_ALERTMANAGER
+
+
+class GrafanaAlertingAPIView(AlertManagerAPIView):
+ """Grafana Alerting has the same payload structure as AlertManager"""
+
+ def check_integration_type(self, alert_receive_channel):
+ return alert_receive_channel.integration == AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING
+
+
+class GrafanaAPIView(AlertManagerAPIView):
+ """Support both new and old versions of Grafana Alerting"""
+
+ def post(self, request, alert_receive_channel):
+ # New Grafana has the same payload structure as AlertManager
+ if "alerts" in request.data:
+ return super().post(request, alert_receive_channel)
+
+ """
+ Example of request.data from old Grafana:
+ {
+ 'evalMatches': [{
+ 'value': 100,
+ 'metric': 'High value',
+ 'tags': None
+ }, {
+ 'value': 200,
+ 'metric': 'Higher Value',
+ 'tags': None
+ }],
+ 'imageUrl': 'http://grafana.org/assets/img/blog/mixed_styles.png',
+ 'message': 'Someone is testing the alert notification within grafana.',
+ 'ruleId': 0,
+ 'ruleName': 'Test notification',
+ 'ruleUrl': 'http://localhost:3000/',
+ 'state': 'alerting',
+ 'title': '[Alerting] Test notification'
+ }
+ """
+ if not self.check_integration_type(alert_receive_channel):
+ return HttpResponseBadRequest(
+ "This url is for integration with Grafana. Key is for "
+ + str(alert_receive_channel.get_integration_display())
+ )
+
+ if "attachments" in request.data:
+ # Fallback in case user by mistake configured Slack url instead of webhook
+ """
+ {
+ "parse": "full",
+ "channel": "#dev",
+ "attachments": [
+ {
+ "ts": 1549259302,
+ "text": " ",
+ "color": "#D63232",
+ "title": "[Alerting] Test server RAM Usage alert",
+ "fields": [
+ {
+ "short": true,
+ "title": "System",
+ "value": 1563850717.2881355
+ }
+ ],
+ "footer": "Grafana v5.4.3",
+ "fallback": "[Alerting] Test server RAM Usage alert",
+ "image_url": "",
+ "title_link": "http://abc",
+ "footer_icon": "https://grafana.com/assets/img/fav32.png"
+ }
+ ]
+ }
+ """
+ attachment = request.data["attachments"][0]
+
+ create_alert.apply_async(
+ [],
+ {
+ "title": attachment.get("title", "Title"),
+ "message": "_FYI: Misconfiguration detected. Please switch integration type from Slack to WebHook in "
+ "Grafana._\n_Integration URL: {} _\n\n".format(alert_receive_channel.integration_url)
+ + attachment.get("text", ""),
+ "image_url": attachment.get("image_url", None),
+ "link_to_upstream_details": attachment.get("title_link", None),
+ "alert_receive_channel_pk": alert_receive_channel.pk,
+ "integration_unique_data": json.dumps(
+ {
+ "evalMatches": [
+ {"metric": value["title"], "value": str(value["value"])}
+ for value in attachment["fields"]
+ ]
+ }
+ ),
+ "raw_request_data": request.data,
+ },
+ )
+ else:
+ create_alert.apply_async(
+ [],
+ {
+ "title": request.data.get("title", "Title"),
+ "message": request.data.get("message", None),
+ "image_url": request.data.get("imageUrl", None),
+ "link_to_upstream_details": request.data.get("ruleUrl", None),
+ "alert_receive_channel_pk": alert_receive_channel.pk,
+ "integration_unique_data": json.dumps({"evalMatches": request.data.get("evalMatches", [])}),
+ "raw_request_data": request.data,
+ },
+ )
+ return Response("Ok.")
+
+ def check_integration_type(self, alert_receive_channel):
+ return alert_receive_channel.integration == AlertReceiveChannel.INTEGRATION_GRAFANA
+
+
+class UniversalAPIView(BrowsableInstructionMixin, AlertChannelDefiningMixin, IntegrationRateLimitMixin, APIView):
+ def post(self, request, alert_receive_channel, *args, **kwargs):
+ if not alert_receive_channel.config.slug == kwargs["integration_type"]:
+ return HttpResponseBadRequest(
+ f"This url is for integration with {alert_receive_channel.config.title}."
+ f"Key is for {alert_receive_channel.get_integration_display()}"
+ )
+ create_alert.apply_async(
+ [],
+ {
+ "title": None,
+ "message": None,
+ "image_url": None,
+ "link_to_upstream_details": None,
+ "alert_receive_channel_pk": alert_receive_channel.pk,
+ "integration_unique_data": None,
+ "raw_request_data": request.data,
+ },
+ )
+ return Response("Ok.")
+
+
+# TODO: restore HeartBeatAPIView integration or clean it up as it is not used now
+class HeartBeatAPIView(AlertChannelDefiningMixin, APIView):
+ def get(self, request, alert_receive_channel):
+ template = loader.get_template("heartbeat_link.html")
+ docs_url = urljoin(settings.DOCS_URL, "/#/integrations/heartbeat")
+ return HttpResponse(
+ template.render(
+ {
+ "docs_url": docs_url,
+ }
+ )
+ )
+
+ def post(self, request, alert_receive_channel):
+ HeartBeat = apps.get_model("heartbeat", "HeartBeat")
+
+ if request.data.get("action") == "activate":
+ # timeout_seconds
+ timeout_seconds = request.data.get("timeout_seconds")
+ try:
+ timeout_seconds = int(timeout_seconds)
+ except ValueError:
+ timeout_seconds = None
+
+ if timeout_seconds is None:
+ return Response(status=400, data="timeout_seconds int expected")
+ # id
+ _id = request.data.get("id", "default")
+ # title
+ title = request.data.get("title", "Title")
+ # title
+ link = request.data.get("link")
+ # message
+ message = request.data.get("message")
+
+ heartbeat = HeartBeat(
+ alert_receive_channel=alert_receive_channel,
+ timeout_seconds=timeout_seconds,
+ title=title,
+ message=message,
+ link=link,
+ user_defined_id=_id,
+ last_heartbeat_time=timezone.now(),
+ last_checkup_task_time=timezone.now(),
+ actual_check_up_task_id="none",
+ )
+ try:
+ heartbeat.save()
+ with transaction.atomic():
+ heartbeat = HeartBeat.objects.filter(pk=heartbeat.pk).select_for_update()[0]
+ task = heartbeat_checkup.apply_async(
+ (heartbeat.pk,),
+ countdown=heartbeat.timeout_seconds,
+ )
+ heartbeat.actual_check_up_task_id = task.id
+ heartbeat.save()
+ except IntegrityError:
+ return Response(status=400, data="id should be unique")
+
+ elif request.data.get("action") == "deactivate":
+ _id = request.data.get("id", "default")
+ try:
+ heartbeat = HeartBeat.objects.filter(
+ alert_receive_channel=alert_receive_channel,
+ user_defined_id=_id,
+ ).get()
+ heartbeat.delete()
+ except HeartBeat.DoesNotExist:
+ return Response(status=400, data="heartbeat not found")
+
+ elif request.data.get("action") == "list":
+ result = []
+ heartbeats = HeartBeat.objects.filter(
+ alert_receive_channel=alert_receive_channel,
+ ).all()
+ for heartbeat in heartbeats:
+ result.append(
+ {
+ "created_at": heartbeat.created_at,
+ "last_heartbeat": heartbeat.last_heartbeat_time,
+ "expiration_time": heartbeat.expiration_time,
+ "is_expired": heartbeat.is_expired,
+ "id": heartbeat.user_defined_id,
+ "title": heartbeat.title,
+ "timeout_seconds": heartbeat.timeout_seconds,
+ "link": heartbeat.link,
+ "message": heartbeat.message,
+ }
+ )
+ return Response(result)
+
+ elif request.data.get("action") == "heartbeat":
+ _id = request.data.get("id", "default")
+ with transaction.atomic():
+ try:
+ heartbeat = HeartBeat.objects.filter(
+ alert_receive_channel=alert_receive_channel,
+ user_defined_id=_id,
+ ).select_for_update()[0]
+ task = heartbeat_checkup.apply_async(
+ (heartbeat.pk,),
+ countdown=heartbeat.timeout_seconds,
+ )
+ heartbeat.actual_check_up_task_id = task.id
+ heartbeat.last_heartbeat_time = timezone.now()
+ update_fields = ["actual_check_up_task_id", "last_heartbeat_time"]
+ state_changed = heartbeat.check_heartbeat_state()
+ if state_changed:
+ update_fields.append("previous_alerted_state_was_life")
+ heartbeat.save(update_fields=update_fields)
+ except IndexError:
+ return Response(status=400, data="heartbeat not found")
+ return Response("Ok.")
+
+
+class InboundWebhookEmailView(AlertChannelDefiningMixin, APIView):
+ permission_classes = [AllowOnlySendgrid]
+
+ def dispatch(self, *args, **kwargs):
+ parse = Parse(self.request)
+ self.email_data = parse.key_values()
+ # When email is forwarded recipient field can be stored both in "to" and in "envelope" fields.
+ token_from_to = self._parse_token_from_to(self.email_data)
+ try:
+ kwargs["alert_channel_key"] = token_from_to
+ return super().dispatch(*args, **kwargs)
+ except KeyError as e:
+ logger.warning(f"InboundWebhookEmailView: {e}")
+ except PermissionDenied as e:
+ self._log_permission_denied(token_from_to, e)
+ kwargs.pop("alert_channel_key")
+
+ token_from_envelope = self._parse_token_from_envelope(self.email_data)
+ try:
+ kwargs["alert_channel_key"] = token_from_envelope
+ return super().dispatch(*args, **kwargs)
+ except KeyError as e:
+ logger.warning(f"InboundWebhookEmailView: {e}")
+ except PermissionDenied as e:
+ self._log_permission_denied(token_from_to, e)
+ kwargs.pop("alert_channel_key")
+
+ raise PermissionDenied("Integration key was not found. Permission denied.")
+
+ def _log_permission_denied(self, token, e):
+ logger.info(
+ f"InboundWebhookEmailView: Permission denied. token {token}. "
+ f"To {self.email_data.get('to')}. "
+ f"Envelope {self.email_data.get('envelope')}."
+ f"Exception: {e}"
+ )
+
+ def _parse_token_from_envelope(self, email_data):
+ envelope = email_data["envelope"]
+ envelope = json.loads(envelope)
+ token = envelope.get("to")[0].split("@")[0]
+ return token
+
+ def _parse_token_from_to(self, email_data):
+ return email_data["to"].split("@")[0]
+
+ def post(self, request, alert_receive_channel=None):
+ title = self.email_data["subject"]
+ message = self.email_data.get("text", "").strip()
+
+ payload = {"title": title, "message": message}
+
+ if alert_receive_channel:
+ create_alert.apply_async(
+ [],
+ {
+ "title": title,
+ "message": message,
+ "alert_receive_channel_pk": alert_receive_channel.pk,
+ "image_url": None,
+ "link_to_upstream_details": payload.get("link_to_upstream_details"),
+ "integration_unique_data": payload,
+ "raw_request_data": request.data,
+ },
+ )
+
+ return Response("OK")
+
+
+class IntegrationHeartBeatAPIView(AlertChannelDefiningMixin, IntegrationHeartBeatRateLimitMixin, APIView):
+ def get(self, request, alert_receive_channel):
+ self._process_heartbeat_signal(request, alert_receive_channel)
+ return Response(":)")
+
+ def post(self, request, alert_receive_channel):
+ self._process_heartbeat_signal(request, alert_receive_channel)
+ return Response(status=200)
+
+ def _process_heartbeat_signal(self, request, alert_receive_channel):
+ process_heartbeat_task.apply_async(
+ (alert_receive_channel.pk,),
+ )
diff --git a/engine/apps/migration_tool/__init__.py b/engine/apps/migration_tool/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/migration_tool/constants.py b/engine/apps/migration_tool/constants.py
new file mode 100644
index 0000000000..4ab4377d82
--- /dev/null
+++ b/engine/apps/migration_tool/constants.py
@@ -0,0 +1,7 @@
+# amixr api url
+REQUEST_URL = "https://amixr.io/api/v1"
+
+# migration status
+NOT_STARTED = "not_started"
+IN_PROGRESS = "in_progress"
+FINISHED = "finished"
diff --git a/engine/apps/migration_tool/migrations/0001_squashed_initial.py b/engine/apps/migration_tool/migrations/0001_squashed_initial.py
new file mode 100644
index 0000000000..3772704d14
--- /dev/null
+++ b/engine/apps/migration_tool/migrations/0001_squashed_initial.py
@@ -0,0 +1,33 @@
+# Generated by Django 3.2.5 on 2022-05-31 14:46
+
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ('alerts', '0001_squashed_initial'),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='AmixrMigrationTaskStatus',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('task_id', models.CharField(db_index=True, max_length=500)),
+ ('name', models.CharField(max_length=500)),
+ ('started_at', models.DateTimeField(auto_now_add=True)),
+ ('is_finished', models.BooleanField(default=False)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='LockedAlert',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('alert', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='migrator_lock', to='alerts.alert')),
+ ],
+ ),
+ ]
diff --git a/engine/apps/migration_tool/migrations/0002_amixrmigrationtaskstatus_organization.py b/engine/apps/migration_tool/migrations/0002_amixrmigrationtaskstatus_organization.py
new file mode 100644
index 0000000000..8480acddef
--- /dev/null
+++ b/engine/apps/migration_tool/migrations/0002_amixrmigrationtaskstatus_organization.py
@@ -0,0 +1,22 @@
+# Generated by Django 3.2.5 on 2022-05-31 14:46
+
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ('user_management', '0001_squashed_initial'),
+ ('migration_tool', '0001_squashed_initial'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='amixrmigrationtaskstatus',
+ name='organization',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='migration_tasks', to='user_management.organization'),
+ ),
+ ]
diff --git a/engine/apps/migration_tool/migrations/__init__.py b/engine/apps/migration_tool/migrations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/migration_tool/models/__init__.py b/engine/apps/migration_tool/models/__init__.py
new file mode 100644
index 0000000000..c7478eab1b
--- /dev/null
+++ b/engine/apps/migration_tool/models/__init__.py
@@ -0,0 +1,2 @@
+from .amixr_migration_task_status import AmixrMigrationTaskStatus # noqa: F401
+from .locked_alert import LockedAlert # noqa: F401
diff --git a/engine/apps/migration_tool/models/amixr_migration_task_status.py b/engine/apps/migration_tool/models/amixr_migration_task_status.py
new file mode 100644
index 0000000000..543013d707
--- /dev/null
+++ b/engine/apps/migration_tool/models/amixr_migration_task_status.py
@@ -0,0 +1,27 @@
+from celery import uuid as celery_uuid
+from django.db import models
+
+
+class AmixrMigrationTaskStatusQuerySet(models.QuerySet):
+ def get_migration_task_id(self, organization_id, name):
+ migrate_schedules_task_id = celery_uuid()
+ self.model(organization_id=organization_id, name=name, task_id=migrate_schedules_task_id).save()
+ return migrate_schedules_task_id
+
+
+class AmixrMigrationTaskStatus(models.Model):
+ objects = AmixrMigrationTaskStatusQuerySet.as_manager()
+
+ task_id = models.CharField(max_length=500, db_index=True)
+ name = models.CharField(max_length=500)
+ organization = models.ForeignKey(
+ to="user_management.Organization",
+ related_name="migration_tasks",
+ on_delete=models.deletion.CASCADE,
+ )
+ started_at = models.DateTimeField(auto_now_add=True)
+ is_finished = models.BooleanField(default=False)
+
+ def update_status_to_finished(self):
+ self.is_finished = True
+ self.save(update_fields=["is_finished"])
diff --git a/engine/apps/migration_tool/models/locked_alert.py b/engine/apps/migration_tool/models/locked_alert.py
new file mode 100644
index 0000000000..8771c6ce18
--- /dev/null
+++ b/engine/apps/migration_tool/models/locked_alert.py
@@ -0,0 +1,5 @@
+from django.db import models
+
+
+class LockedAlert(models.Model):
+ alert = models.OneToOneField("alerts.Alert", on_delete=models.CASCADE, related_name="migrator_lock")
diff --git a/engine/apps/migration_tool/tasks.py b/engine/apps/migration_tool/tasks.py
new file mode 100644
index 0000000000..23ceff21a1
--- /dev/null
+++ b/engine/apps/migration_tool/tasks.py
@@ -0,0 +1,612 @@
+import logging
+
+from celery.utils.log import get_task_logger
+from django.apps import apps
+from django.conf import settings
+from django.db import transaction
+from django.utils import timezone
+from rest_framework import exceptions
+
+from apps.alerts.models import Alert, AlertGroup, AlertReceiveChannel, ResolutionNote
+from apps.migration_tool.models import AmixrMigrationTaskStatus, LockedAlert
+from apps.migration_tool.utils import convert_string_to_datetime, get_data_with_respect_to_pagination
+from apps.public_api.serializers import PersonalNotificationRuleSerializer
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+logger = get_task_logger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def start_migration_from_old_amixr(api_token, organization_id, user_id):
+ logger.info(f"Start migration task from amixr for organization {organization_id}")
+ users = get_users(organization_id, api_token)
+
+ migrate_schedules_task_id = AmixrMigrationTaskStatus.objects.get_migration_task_id(
+ organization_id=organization_id, name=migrate_schedules.name
+ )
+ migrate_schedules.apply_async(
+ (api_token, organization_id, user_id, users),
+ task_id=migrate_schedules_task_id,
+ countdown=5,
+ )
+
+ start_migration_user_data_task_id = AmixrMigrationTaskStatus.objects.get_migration_task_id(
+ organization_id=organization_id, name=start_migration_user_data.name
+ )
+ start_migration_user_data.apply_async(
+ (api_token, organization_id, users),
+ task_id=start_migration_user_data_task_id,
+ )
+ logger.info(f"Start 'start_migration_from_old_amixr' task for organization {organization_id}")
+
+
+def get_users(organization_id, api_token):
+ Organization = apps.get_model("user_management", "Organization")
+ organization = Organization.objects.get(pk=organization_id)
+ # get all users from old amixr
+ old_users = get_data_with_respect_to_pagination(api_token, "users")
+ old_users_emails = [old_user["email"] for old_user in old_users]
+ # find users in Grafana OnCall by email
+ grafana_users = organization.users.filter(email__in=old_users_emails).values("email", "id")
+
+ grafana_users_dict = {
+ gu["email"]: {
+ "id": gu["id"],
+ }
+ for gu in grafana_users
+ }
+
+ users = {}
+ for old_user in old_users:
+ if old_user["email"] in grafana_users_dict:
+ users[old_user["id"]] = grafana_users_dict[old_user["email"]]
+ users[old_user["id"]]["old_verified_phone_number"] = old_user.get("verified_phone_number")
+ users[old_user["id"]]["old_public_primary_key"] = old_user["id"]
+
+ # Example result:
+ # users = {
+ # "OLD_PUBLIC_PK": {
+ # "id": 1, # user pk in OnCall db
+ # "old_verified_phone_number": "1234",
+ # "old_public_primary_key": "OLD_PUBLIC_PK",
+ # },
+ # ...
+ # }
+ return users
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def migrate_schedules(api_token, organization_id, user_id, users):
+ logger.info(f"Started migration schedules for organization {organization_id}")
+ OnCallScheduleICal = apps.get_model("schedules", "OnCallScheduleICal")
+ Organization = apps.get_model("user_management", "Organization")
+ organization = Organization.objects.get(pk=organization_id)
+
+ schedules = get_data_with_respect_to_pagination(api_token, "schedules")
+ existing_schedules_names = set(organization.oncall_schedules.values_list("name", flat=True))
+ created_schedules = {}
+ for schedule in schedules:
+ if not schedule["ical_url"] or schedule["name"] in existing_schedules_names:
+ continue
+
+ new_schedule = OnCallScheduleICal(
+ organization=organization,
+ name=schedule["name"],
+ ical_url_primary=schedule["ical_url"],
+ team_id=None,
+ )
+
+ new_schedule.save()
+
+ created_schedules[schedule["id"]] = {
+ "id": new_schedule.pk,
+ }
+ # Example result:
+ # created_schedules = {
+ # "OLD_PUBLIC_PK": {
+ # "id": 1, # schedule pk in OnCall db
+ # },
+ # ...
+ # }
+
+ migrate_integrations_task_id = AmixrMigrationTaskStatus.objects.get_migration_task_id(
+ organization_id=organization_id, name=migrate_integrations.name
+ )
+ migrate_integrations.apply_async(
+ (api_token, organization_id, user_id, created_schedules, users), task_id=migrate_integrations_task_id
+ )
+
+ current_task_id = migrate_schedules.request.id
+ AmixrMigrationTaskStatus.objects.get(task_id=current_task_id).update_status_to_finished()
+ logger.info(f"Finished migration schedules for organization {organization_id}")
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def migrate_integrations(api_token, organization_id, user_id, created_schedules, users):
+ logger.info(f"Started migration integrations for organization {organization_id}")
+ Organization = apps.get_model("user_management", "Organization")
+ organization = Organization.objects.get(pk=organization_id)
+
+ integrations = get_data_with_respect_to_pagination(api_token, "integrations")
+
+ existing_integrations_names = set(organization.alert_receive_channels.values_list("verbal_name", flat=True))
+
+ for integration in integrations:
+ if integration["name"] in existing_integrations_names:
+ continue
+
+ try:
+ integration_type = [
+ key
+ for key, value in AlertReceiveChannel.INTEGRATIONS_TO_REVERSE_URL_MAP.items()
+ if value == integration["type"]
+ ][0]
+ except IndexError:
+ continue
+ if integration_type not in AlertReceiveChannel.WEB_INTEGRATION_CHOICES:
+ continue
+
+ new_integration = AlertReceiveChannel.create(
+ organization=organization,
+ verbal_name=integration["name"],
+ integration=integration_type,
+ author_id=user_id,
+ slack_title_template=integration["templates"]["slack"]["title"],
+ slack_message_template=integration["templates"]["slack"]["message"],
+ slack_image_url_template=integration["templates"]["slack"]["image_url"],
+ sms_title_template=integration["templates"]["sms"]["title"],
+ phone_call_title_template=integration["templates"]["phone_call"]["title"],
+ web_title_template=integration["templates"]["web"]["title"],
+ web_message_template=integration["templates"]["web"]["message"],
+ web_image_url_template=integration["templates"]["web"]["image_url"],
+ email_title_template=integration["templates"]["email"]["title"],
+ email_message_template=integration["templates"]["email"]["message"],
+ telegram_title_template=integration["templates"]["telegram"]["title"],
+ telegram_message_template=integration["templates"]["telegram"]["message"],
+ telegram_image_url_template=integration["templates"]["telegram"]["image_url"],
+ grouping_id_template=integration["templates"]["grouping_key"],
+ resolve_condition_template=integration["templates"]["resolve_signal"],
+ acknowledge_condition_template=integration["templates"]["acknowledge_signal"],
+ )
+ # collect integration data in a dict
+ integration_data = {
+ "id": new_integration.pk,
+ "verbal_name": new_integration.verbal_name,
+ "old_public_primary_key": integration["id"],
+ }
+
+ migrate_routes_task_id = AmixrMigrationTaskStatus.objects.get_migration_task_id(
+ organization_id=organization_id, name=migrate_routes.name
+ )
+ migrate_routes.apply_async(
+ (api_token, organization_id, users, created_schedules, integration_data),
+ task_id=migrate_routes_task_id,
+ countdown=3,
+ )
+
+ current_task_id = migrate_integrations.request.id
+ AmixrMigrationTaskStatus.objects.get(task_id=current_task_id).update_status_to_finished()
+ logger.info(f"Finished migration integrations for organization {organization_id}")
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def migrate_routes(api_token, organization_id, users, created_schedules, integration_data):
+ logger.info(f"Start migration routes for organization {organization_id}")
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+ ChannelFilter = apps.get_model("alerts", "ChannelFilter")
+ Organization = apps.get_model("user_management", "Organization")
+ organization = Organization.objects.get(pk=organization_id)
+
+ integration = AlertReceiveChannel.objects.filter(pk=integration_data["id"]).first()
+ if integration:
+ url = "routes?integration_id={}".format(integration_data["old_public_primary_key"])
+ routes = get_data_with_respect_to_pagination(api_token, url)
+
+ default_route = integration.channel_filters.get(is_default=True)
+ existing_chain_names = set(organization.escalation_chains.values_list("name", flat=True))
+ existing_route_filtering_term = set(integration.channel_filters.values_list("filtering_term", flat=True))
+
+ for route in routes:
+ is_default_route = route["is_the_last_route"]
+ filtering_term = route["routing_regex"]
+
+ if is_default_route:
+ escalation_chain_name = f"{integration_data['verbal_name'][:90]} - default"
+ else:
+ if filtering_term in existing_route_filtering_term:
+ continue
+ escalation_chain_name = f"{integration_data['verbal_name']} - {filtering_term}"[:100]
+
+ if escalation_chain_name in existing_chain_names:
+ escalation_chain = organization.escalation_chains.get(name=escalation_chain_name)
+ else:
+ escalation_chain = organization.escalation_chains.create(name=escalation_chain_name)
+
+ if is_default_route:
+ new_route = default_route
+ new_route.escalation_chain = escalation_chain
+ new_route.save(update_fields=["escalation_chain"])
+ else:
+ new_route = ChannelFilter(
+ alert_receive_channel_id=integration_data["id"],
+ escalation_chain_id=escalation_chain.pk,
+ filtering_term=filtering_term,
+ order=route["position"],
+ )
+ new_route.save()
+
+ route_data = {
+ "id": new_route.pk,
+ "old_public_primary_key": route["id"],
+ "escalation_chain": {
+ "id": escalation_chain.pk,
+ },
+ }
+
+ migrate_escalation_policies_task_id = AmixrMigrationTaskStatus.objects.get_migration_task_id(
+ organization_id=organization_id, name=migrate_escalation_policies.name
+ )
+ migrate_escalation_policies.apply_async(
+ (api_token, organization_id, users, created_schedules, route_data),
+ task_id=migrate_escalation_policies_task_id,
+ countdown=2,
+ )
+
+ start_migration_alert_groups_task_id = AmixrMigrationTaskStatus.objects.get_migration_task_id(
+ organization_id=organization_id, name=start_migration_alert_groups.name
+ )
+ start_migration_alert_groups.apply_async(
+ (api_token, organization_id, users, integration_data, route_data),
+ task_id=start_migration_alert_groups_task_id,
+ countdown=10,
+ )
+
+ current_task_id = migrate_routes.request.id
+ AmixrMigrationTaskStatus.objects.get(task_id=current_task_id).update_status_to_finished()
+ logger.info(f"Finished migration routes for organization {organization_id}")
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def migrate_escalation_policies(api_token, organization_id, users, created_schedules, route_data):
+ logger.info(f"Start migration escalation policies for organization {organization_id}")
+ EscalationChain = apps.get_model("alerts", "EscalationChain")
+ EscalationPolicy = apps.get_model("alerts", "EscalationPolicy")
+
+ escalation_chain = EscalationChain.objects.filter(pk=route_data["escalation_chain"]["id"]).first()
+ if escalation_chain and not escalation_chain.escalation_policies.exists():
+
+ url = "escalation_policies?route_id={}".format(route_data["old_public_primary_key"])
+ escalation_policies = get_data_with_respect_to_pagination(api_token, url)
+
+ for escalation_policy in escalation_policies:
+ try:
+ step_type = [
+ key
+ for key, value in EscalationPolicy.PUBLIC_STEP_CHOICES_MAP.items()
+ if value == escalation_policy["type"] and key in EscalationPolicy.PUBLIC_STEP_CHOICES
+ ][0]
+ except IndexError:
+ continue
+
+ if step_type in EscalationPolicy.DEFAULT_TO_IMPORTANT_STEP_MAPPING and escalation_policy.get("important"):
+ step_type = EscalationPolicy.DEFAULT_TO_IMPORTANT_STEP_MAPPING[step_type]
+
+ notify_to_users_queue = []
+
+ if step_type == EscalationPolicy.STEP_NOTIFY_USERS_QUEUE:
+ notify_to_users_queue = [
+ users[user_old_public_pk]["id"]
+ for user_old_public_pk in escalation_policy.get("persons_to_notify_next_each_time", [])
+ if user_old_public_pk in users
+ ]
+ elif step_type in [
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
+ ]:
+ notify_to_users_queue = [
+ users[user_old_public_pk]["id"]
+ for user_old_public_pk in escalation_policy.get("persons_to_notify", [])
+ if user_old_public_pk in users
+ ]
+
+ if step_type == EscalationPolicy.STEP_NOTIFY_IF_TIME:
+ notify_from_time = timezone.datetime.strptime(
+ escalation_policy.get("notify_if_time_from"), "%H:%M:%SZ"
+ ).time()
+ notify_to_time = timezone.datetime.strptime(
+ escalation_policy.get("notify_if_time_to"), "%H:%M:%SZ"
+ ).time()
+ else:
+ notify_from_time, notify_to_time = None, None
+ duration = escalation_policy.get("duration")
+ wait_delay = timezone.timedelta(seconds=duration) if duration else None
+
+ schedule_id = escalation_policy.get("notify_on_call_from_schedule")
+
+ notify_schedule_id = created_schedules.get(schedule_id, {}).get("id") if schedule_id else None
+
+ new_escalation_policy = EscalationPolicy(
+ step=step_type,
+ order=escalation_policy["position"],
+ escalation_chain=escalation_chain,
+ notify_schedule_id=notify_schedule_id,
+ wait_delay=wait_delay,
+ from_time=notify_from_time,
+ to_time=notify_to_time,
+ )
+
+ new_escalation_policy.save()
+ if notify_to_users_queue:
+ new_escalation_policy.notify_to_users_queue.set(notify_to_users_queue)
+
+ current_task_id = migrate_escalation_policies.request.id
+ AmixrMigrationTaskStatus.objects.get(task_id=current_task_id).update_status_to_finished()
+ logger.info(f"Finished migration escalation policies for organization {organization_id}")
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def start_migration_alert_groups(api_token, organization_id, users, integration_data, route_data):
+ logger.info(f"Start migration alert groups for organization {organization_id}")
+ ChannelFilter = apps.get_model("alerts", "ChannelFilter")
+
+ url = "incidents?route_id={}".format(route_data["old_public_primary_key"])
+ alert_groups = get_data_with_respect_to_pagination(api_token, url)
+
+ route = ChannelFilter.objects.filter(pk=route_data["id"]).first()
+
+ if route and not route.alert_groups.exists():
+ for alert_group in alert_groups:
+
+ migrate_alert_group_task_id = AmixrMigrationTaskStatus.objects.get_migration_task_id(
+ organization_id=organization_id, name=migrate_alert_group.name
+ )
+ migrate_alert_group.apply_async(
+ (api_token, organization_id, users, integration_data, route_data, alert_group),
+ task_id=migrate_alert_group_task_id,
+ )
+
+ current_task_id = start_migration_alert_groups.request.id
+ AmixrMigrationTaskStatus.objects.get(task_id=current_task_id).update_status_to_finished()
+ logger.info(f"Finished 'start_migration_alert_groups' for organization {organization_id}")
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def migrate_alert_group(api_token, organization_id, users, integration_data, route_data, alert_group_to_migrate):
+ logger.info(f"Start migration alert_group {alert_group_to_migrate['id']} for organization {organization_id}")
+ integration = AlertReceiveChannel.objects.get(pk=integration_data["id"])
+ resolve_by_user_id = None
+ acknowledged_by_user_id = None
+
+ if alert_group_to_migrate["resolved_by_user"]:
+ resolve_by_user_id = users.get(alert_group_to_migrate["resolved_by_user"], {}).get("id")
+ if alert_group_to_migrate["acknowledged_by_user"]:
+ acknowledged_by_user_id = users.get(alert_group_to_migrate["acknowledged_by_user"], {}).get("id")
+
+ new_group = AlertGroup.all_objects.create(
+ channel=integration,
+ channel_filter_id=route_data["id"],
+ resolved=True,
+ resolved_by=alert_group_to_migrate["resolved_by"],
+ resolved_by_user_id=resolve_by_user_id,
+ resolved_at=alert_group_to_migrate.get("resolved_at") or timezone.now(),
+ acknowledged=alert_group_to_migrate["acknowledged"],
+ acknowledged_by=alert_group_to_migrate["acknowledged_by"],
+ acknowledged_by_user_id=acknowledged_by_user_id,
+ acknowledged_at=alert_group_to_migrate.get("acknowledged_at"),
+ )
+
+ new_group.started_at = convert_string_to_datetime(alert_group_to_migrate["created_at"])
+ new_group.save(update_fields=["started_at"])
+
+ alert_group_data = {
+ "id": new_group.pk,
+ "old_public_primary_key": alert_group_to_migrate["id"],
+ }
+
+ start_migration_alerts_task_id = AmixrMigrationTaskStatus.objects.get_migration_task_id(
+ organization_id=organization_id, name=start_migration_alerts.name
+ )
+ start_migration_alerts.apply_async(
+ (api_token, organization_id, alert_group_data),
+ task_id=start_migration_alerts_task_id,
+ )
+
+ start_migration_logs_task_id = AmixrMigrationTaskStatus.objects.get_migration_task_id(
+ organization_id=organization_id, name=start_migration_logs.name
+ )
+ start_migration_logs.apply_async(
+ (api_token, organization_id, users, alert_group_data),
+ task_id=start_migration_logs_task_id,
+ countdown=5,
+ )
+
+ current_task_id = migrate_alert_group.request.id
+ AmixrMigrationTaskStatus.objects.get(task_id=current_task_id).update_status_to_finished()
+ logger.info(f"Finished migration alert_group {alert_group_to_migrate['id']} for organization {organization_id}")
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def start_migration_alerts(api_token, organization_id, alert_group_data):
+ logger.info(
+ f"Start migration alerts for alert_group {alert_group_data['old_public_primary_key']} "
+ f"for organization {organization_id}"
+ )
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_data["id"])
+ if not alert_group.alerts.exists():
+
+ url = "alerts?incident_id={}".format(alert_group_data["old_public_primary_key"])
+ alerts = get_data_with_respect_to_pagination(api_token, url)
+
+ for alert in alerts:
+ migrate_alerts_task_id = AmixrMigrationTaskStatus.objects.get_migration_task_id(
+ organization_id=organization_id, name=migrate_alert.name
+ )
+ migrate_alert.apply_async(
+ (organization_id, alert_group_data, alert),
+ task_id=migrate_alerts_task_id,
+ )
+
+ current_task_id = start_migration_alerts.request.id
+ AmixrMigrationTaskStatus.objects.get(task_id=current_task_id).update_status_to_finished()
+ logger.info(
+ f"Finished 'start_migration_alerts' for alert_group {alert_group_data['old_public_primary_key']} "
+ f"for organization {organization_id}"
+ )
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def migrate_alert(organization_id, alert_group_data, alert):
+ logger.info(f"Start migration alert {alert['id']} for organization {organization_id}")
+ with transaction.atomic():
+ new_alert = Alert(
+ title=alert["title"],
+ message=alert["message"],
+ image_url=alert["image_url"],
+ link_to_upstream_details=alert["link_to_upstream_details"],
+ group_id=alert_group_data["id"],
+ integration_unique_data=alert["payload"],
+ raw_request_data=alert["payload"],
+ )
+ new_alert.save()
+ LockedAlert.objects.create(alert=new_alert)
+ new_alert.created_at = convert_string_to_datetime(alert["created_at"])
+ new_alert.save(update_fields=["created_at"])
+
+ current_task_id = migrate_alert.request.id
+ AmixrMigrationTaskStatus.objects.get(task_id=current_task_id).update_status_to_finished()
+ logger.info(f"Finished migration alert {alert['id']} for organization {organization_id}")
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def start_migration_logs(api_token, organization_id, users, alert_group_data):
+ logger.info(f"Start migration logs for alert_group {alert_group_data['id']} for organization {organization_id}")
+ url = "incident_logs?incident_id={}".format(alert_group_data["old_public_primary_key"])
+ alert_group_logs = get_data_with_respect_to_pagination(api_token, url)
+
+ for log in alert_group_logs:
+ migrate_logs_task_id = AmixrMigrationTaskStatus.objects.get_migration_task_id(
+ organization_id=organization_id, name=migrate_log.name
+ )
+ migrate_log.apply_async(
+ (organization_id, users, alert_group_data, log),
+ task_id=migrate_logs_task_id,
+ )
+
+ current_task_id = start_migration_logs.request.id
+ AmixrMigrationTaskStatus.objects.get(task_id=current_task_id).update_status_to_finished()
+ logger.info(
+ f"Finished 'start_migration_logs' for alert_group {alert_group_data['id']} for organization {organization_id}"
+ )
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def migrate_log(organization_id, users, alert_group_data, log):
+ logger.info(f"Start migration log for alert_group {alert_group_data['id']} for organization {organization_id}")
+ log_author_id = users.get(log["author"], {}).get("id")
+ new_resolution_note = ResolutionNote(
+ author_id=log_author_id,
+ message_text=log["text"],
+ alert_group_id=alert_group_data["id"],
+ )
+ new_resolution_note.save()
+ new_resolution_note.created_at = convert_string_to_datetime(log["created_at"])
+ new_resolution_note.save(update_fields=["created_at"])
+
+ current_task_id = migrate_log.request.id
+ AmixrMigrationTaskStatus.objects.get(task_id=current_task_id).update_status_to_finished()
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def start_migration_user_data(api_token, organization_id, users):
+ logger.info(f"Start migration user data for organization {organization_id}")
+ for user in users:
+ user_data = users[user]
+ migrate_user_data_task_id = AmixrMigrationTaskStatus.objects.get_migration_task_id(
+ organization_id=organization_id, name=migrate_user_data.name
+ )
+ migrate_user_data.apply_async(
+ (api_token, organization_id, user_data),
+ task_id=migrate_user_data_task_id,
+ )
+
+ current_task_id = start_migration_user_data.request.id
+ AmixrMigrationTaskStatus.objects.get(task_id=current_task_id).update_status_to_finished()
+ logger.info(f"Finished 'start_migration_user_data' task for organization {organization_id}")
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
+)
+def migrate_user_data(api_token, organization_id, user_to_migrate):
+ logger.info(f"Start migration user {user_to_migrate['id']} for organization {organization_id}")
+ User = apps.get_model("user_management", "User")
+ UserNotificationPolicy = apps.get_model("base", "UserNotificationPolicy")
+ user = User.objects.filter(pk=user_to_migrate["id"], organization_id=organization_id).first()
+
+ if user:
+ if not user.verified_phone_number and user_to_migrate["old_verified_phone_number"]:
+ user.save_verified_phone_number(user_to_migrate["old_verified_phone_number"])
+
+ url = "personal_notification_rules?user_id={}".format(user_to_migrate["old_public_primary_key"])
+ user_notification_policies = get_data_with_respect_to_pagination(api_token, url)
+
+ notification_policies_to_create = []
+ existing_notification_policies_ids = list(user.notification_policies.all().values_list("pk", flat=True))
+
+ for notification_policy in user_notification_policies:
+
+ try:
+ step, notification_channel = PersonalNotificationRuleSerializer._type_to_step_and_notification_channel(
+ notification_policy["type"],
+ )
+ except exceptions.ValidationError:
+ continue
+
+ new_notification_policy = UserNotificationPolicy(
+ user=user,
+ important=notification_policy["important"],
+ step=step,
+ order=notification_policy["position"],
+ )
+ if step == UserNotificationPolicy.Step.NOTIFY:
+ new_notification_policy.notify_by = notification_channel
+
+ if step == UserNotificationPolicy.Step.WAIT:
+ duration = notification_policy.get("duration")
+ wait_delay = timezone.timedelta(seconds=duration) if duration else UserNotificationPolicy.FIVE_MINUTES
+ new_notification_policy.wait_delay = wait_delay
+
+ notification_policies_to_create.append(new_notification_policy)
+
+ UserNotificationPolicy.objects.bulk_create(notification_policies_to_create, batch_size=5000)
+ user.notification_policies.filter(pk__in=existing_notification_policies_ids).delete()
+
+ current_task_id = migrate_user_data.request.id
+ AmixrMigrationTaskStatus.objects.get(task_id=current_task_id).update_status_to_finished()
+ logger.info(f"Finished migration user {user_to_migrate['id']} for organization {organization_id}")
diff --git a/engine/apps/migration_tool/urls.py b/engine/apps/migration_tool/urls.py
new file mode 100644
index 0000000000..8aa874d1ae
--- /dev/null
+++ b/engine/apps/migration_tool/urls.py
@@ -0,0 +1,12 @@
+from common.api_helpers.optional_slash_router import optional_slash_path
+
+from .views.customers_migration_tool import MigrateAPIView, MigrationPlanAPIView, MigrationStatusAPIView
+
+app_name = "migration-tool"
+
+
+urlpatterns = [
+ optional_slash_path("amixr_migration_plan", MigrationPlanAPIView.as_view(), name="amixr_migration_plan"),
+ optional_slash_path("migrate_from_amixr", MigrateAPIView.as_view(), name="migrate_from_amixr"),
+ optional_slash_path("amixr_migration_status", MigrationStatusAPIView.as_view(), name="amixr_migration_status"),
+]
diff --git a/engine/apps/migration_tool/utils.py b/engine/apps/migration_tool/utils.py
new file mode 100644
index 0000000000..0faa4efeb7
--- /dev/null
+++ b/engine/apps/migration_tool/utils.py
@@ -0,0 +1,35 @@
+import requests
+from django.utils import timezone
+
+from apps.migration_tool.constants import REQUEST_URL
+
+
+class APIResponseException(Exception):
+ pass
+
+
+def get_data_with_respect_to_pagination(api_token, endpoint):
+ def fetch(url):
+ response = requests.get(url, headers={"AUTHORIZATION": api_token})
+ if response.status_code != 200:
+ raise APIResponseException(f"Status code: {response.status_code}, Data: {response.content}")
+ return response.json()
+
+ data = fetch(f"{REQUEST_URL}/{endpoint}")
+ results = data["results"]
+
+ while data["next"]:
+ data = fetch(data["next"])
+
+ new_results = data["results"]
+ results.extend(new_results)
+
+ return results
+
+
+def convert_string_to_datetime(dt_str):
+ try:
+ dt = timezone.datetime.strptime(dt_str, "%Y-%m-%dT%X.%f%z")
+ except ValueError:
+ dt = timezone.datetime.strptime(dt_str, "%Y-%m-%dT%XZ")
+ return dt
diff --git a/engine/apps/migration_tool/views/__init__.py b/engine/apps/migration_tool/views/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/migration_tool/views/customers_migration_tool.py b/engine/apps/migration_tool/views/customers_migration_tool.py
new file mode 100644
index 0000000000..d34e18c04c
--- /dev/null
+++ b/engine/apps/migration_tool/views/customers_migration_tool.py
@@ -0,0 +1,186 @@
+import logging
+
+import requests
+from rest_framework import status
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.alerts.models import AlertReceiveChannel
+from apps.api.permissions import IsAdmin, MethodPermission
+from apps.auth_token.auth import PluginAuthentication
+from apps.migration_tool.constants import FINISHED, IN_PROGRESS, NOT_STARTED, REQUEST_URL
+from apps.migration_tool.tasks import start_migration_from_old_amixr
+from apps.migration_tool.utils import get_data_with_respect_to_pagination
+from common.api_helpers.exceptions import BadRequest
+
+logger = logging.getLogger(__name__)
+
+
+class MigrationPlanAPIView(APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, MethodPermission)
+
+ method_permissions = {IsAdmin: ("POST",)}
+
+ def post(self, request):
+ api_token = request.data.get("token", None)
+ if api_token is None:
+ raise BadRequest(detail="API token is required")
+
+ organization = request.auth.organization
+ if organization.is_amixr_migration_started:
+ raise BadRequest(detail="Migration from Amixr has already been started")
+
+ # check token
+ response = requests.get(f"{REQUEST_URL}/users", headers={"AUTHORIZATION": api_token})
+ if response.status_code == status.HTTP_403_FORBIDDEN:
+ raise BadRequest(detail="Invalid token")
+
+ # Just not to re-make the frontend...
+ USERS_NOT_TO_MIGRATE_KEY = (
+ "Users WON'T be migrated (couldn't find those users in the Grafana Cloud, ask "
+ "them to sign up if you want their data to be migrated and re-build the migration plan)"
+ )
+
+ USERS_TO_MIGRATE = "Users will be migrated"
+ INTEGRATIONS_TO_MIGRATE = "Integrations to migrate"
+ INTEGRATIONS_COUNT = "Integrations count"
+ ROUTES_COUNT = "Routes count"
+ ESCALATIONS_POLICIES_COUNT = "Escalation policies count"
+ CALENDARS_COUNT = "Calendars count"
+
+ migration_plan = {
+ USERS_TO_MIGRATE: [],
+ USERS_NOT_TO_MIGRATE_KEY: [],
+ INTEGRATIONS_TO_MIGRATE: [],
+ INTEGRATIONS_COUNT: 0,
+ ROUTES_COUNT: 0,
+ ESCALATIONS_POLICIES_COUNT: 0,
+ CALENDARS_COUNT: 0,
+ }
+ logger.info(f"migration plan for organization {organization.pk}: get users")
+ users = get_data_with_respect_to_pagination(api_token, "users")
+ logger.info(f"migration plan for organization {organization.pk}: got users")
+ org_users = organization.users.values_list("email", flat=True)
+ for user in users:
+ if user["email"] in org_users:
+ migration_plan[USERS_TO_MIGRATE].append(user["email"])
+ else:
+ migration_plan[USERS_NOT_TO_MIGRATE_KEY].append(user["email"])
+
+ logger.info(f"migration plan for organization {organization.pk}: get integrations")
+ integrations = get_data_with_respect_to_pagination(api_token, "integrations")
+ logger.info(f"migration plan for organization {organization.pk}: got integrations")
+ existing_integrations_names = set(organization.alert_receive_channels.values_list("verbal_name", flat=True))
+
+ integrations_to_migrate_public_pk = []
+
+ for integration in integrations:
+ if integration["name"] in existing_integrations_names:
+ continue
+
+ try:
+ integration_type = [
+ key
+ for key, value in AlertReceiveChannel.INTEGRATIONS_TO_REVERSE_URL_MAP.items()
+ if value == integration["type"]
+ ][0]
+ except IndexError:
+ continue
+ if integration_type not in AlertReceiveChannel.WEB_INTEGRATION_CHOICES:
+ continue
+
+ migration_plan[INTEGRATIONS_TO_MIGRATE].append(integration["name"])
+ integrations_to_migrate_public_pk.append(integration["id"])
+
+ migration_plan[INTEGRATIONS_COUNT] = len(migration_plan[INTEGRATIONS_TO_MIGRATE])
+
+ routes_to_migrate_public_pk = []
+ logger.info(f"migration plan for organization {organization.pk}: get routes")
+ routes = get_data_with_respect_to_pagination(api_token, "routes")
+ logger.info(f"migration plan for organization {organization.pk}: got routes")
+
+ for route in routes:
+ if route["integration_id"] in integrations_to_migrate_public_pk:
+ migration_plan[ROUTES_COUNT] += 1
+ routes_to_migrate_public_pk.append(route["id"])
+
+ logger.info(f"migration plan for organization {organization.pk}: get escalation_policies")
+ escalation_policies = get_data_with_respect_to_pagination(api_token, "escalation_policies")
+ logger.info(f"migration plan for organization {organization.pk}: got escalation_policies")
+
+ for escalation_policy in escalation_policies:
+ if escalation_policy["route_id"] in routes_to_migrate_public_pk:
+ migration_plan[ESCALATIONS_POLICIES_COUNT] += 1
+
+ logger.info(f"migration plan for organization {organization.pk}: get schedules")
+ schedules = get_data_with_respect_to_pagination(api_token, "schedules")
+ logger.info(f"migration plan for organization {organization.pk}: got schedules")
+
+ existing_schedules_names = set(organization.oncall_schedules.values_list("name", flat=True))
+ for schedule in schedules:
+ if not schedule["ical_url"] or schedule["name"] in existing_schedules_names:
+ continue
+ migration_plan[CALENDARS_COUNT] += 1
+
+ return Response(migration_plan)
+
+
+class MigrateAPIView(APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, IsAdmin)
+
+ def post(self, request):
+ api_token = request.data.get("token", None)
+
+ if api_token is None:
+ raise BadRequest(detail="API token is required")
+
+ organization = request.auth.organization
+ if organization.is_amixr_migration_started:
+ raise BadRequest(detail="Migration from Amixr has already been started")
+ # check token
+ response = requests.get(f"{REQUEST_URL}/users", headers={"AUTHORIZATION": api_token})
+ if response.status_code == status.HTTP_403_FORBIDDEN:
+ raise BadRequest(detail="Invalid token")
+
+ organization.is_amixr_migration_started = True
+ organization.save(update_fields=["is_amixr_migration_started"])
+
+ organization_id = organization.pk
+ user_id = request.user.pk
+ # start migration process
+ start_migration_from_old_amixr.delay(api_token=api_token, organization_id=organization_id, user_id=user_id)
+ return Response(status=status.HTTP_200_OK)
+
+
+class MigrationStatusAPIView(APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated, IsAdmin)
+
+ def get(self, request):
+ organization = request.auth.organization
+ migration_status = self.get_migration_status(organization)
+ endpoints_list = self.get_endpoints_list(organization)
+ return Response(
+ {"migration_status": migration_status, "endpoints_list": endpoints_list}, status=status.HTTP_200_OK
+ )
+
+ def get_migration_status(self, organization):
+ migration_status = NOT_STARTED
+ if organization.is_amixr_migration_started:
+ unfinished_tasks_exist = organization.migration_tasks.filter(is_finished=False).exists()
+ if unfinished_tasks_exist:
+ migration_status = IN_PROGRESS
+ else:
+ migration_status = FINISHED
+ return migration_status
+
+ def get_endpoints_list(self, organization):
+ integrations = organization.alert_receive_channels.filter(team_id__isnull=True)
+ endpoints_list = []
+ for integration in integrations:
+ integration_endpoint = f"{integration.verbal_name}, new endpoint: {integration.integration_url}"
+ endpoints_list.append(integration_endpoint)
+ return endpoints_list
diff --git a/engine/apps/oss_installation/__init__.py b/engine/apps/oss_installation/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/oss_installation/migrations/0001_squashed_initial.py b/engine/apps/oss_installation/migrations/0001_squashed_initial.py
new file mode 100644
index 0000000000..dac55f47cb
--- /dev/null
+++ b/engine/apps/oss_installation/migrations/0001_squashed_initial.py
@@ -0,0 +1,33 @@
+# Generated by Django 3.2.5 on 2022-05-31 14:46
+
+from django.db import migrations, models
+import uuid
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='CloudHeartbeat',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('integration_id', models.CharField(max_length=50)),
+ ('integration_url', models.URLField()),
+ ('success', models.BooleanField(default=False)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='OssInstallation',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('installation_id', models.UUIDField(default=uuid.uuid4, editable=False)),
+ ('created_at', models.DateTimeField(auto_now=True)),
+ ('report_sent_at', models.DateTimeField(default=None, null=True)),
+ ],
+ ),
+ ]
diff --git a/engine/apps/oss_installation/migrations/__init__.py b/engine/apps/oss_installation/migrations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/oss_installation/models/__init__.py b/engine/apps/oss_installation/models/__init__.py
new file mode 100644
index 0000000000..53dea35e45
--- /dev/null
+++ b/engine/apps/oss_installation/models/__init__.py
@@ -0,0 +1,2 @@
+from .heartbeat import CloudHeartbeat # noqa: F401
+from .oss_installation import OssInstallation # noqa: F401
diff --git a/engine/apps/oss_installation/models/heartbeat.py b/engine/apps/oss_installation/models/heartbeat.py
new file mode 100644
index 0000000000..6ddf467a40
--- /dev/null
+++ b/engine/apps/oss_installation/models/heartbeat.py
@@ -0,0 +1,29 @@
+import logging
+
+from django.db import models
+
+from apps.base.utils import live_settings
+
+logger = logging.getLogger(__name__)
+
+
+class CloudHeartbeat(models.Model):
+ integration_id = models.CharField(max_length=50)
+ integration_url = models.URLField()
+ success = models.BooleanField(default=False)
+
+ @classmethod
+ def status(cls):
+ """
+ status returns status of cloud heartbeat:
+ True if it was successfully.
+ False if it wasn't.
+ None if it is disabled.
+ """
+ if live_settings.GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED:
+ cloud_heartbeat = cls.objects.first()
+ if cloud_heartbeat is None:
+ return None
+ return cloud_heartbeat.success
+ else:
+ return None
diff --git a/engine/apps/oss_installation/models/oss_installation.py b/engine/apps/oss_installation/models/oss_installation.py
new file mode 100644
index 0000000000..9e4dd3ddb0
--- /dev/null
+++ b/engine/apps/oss_installation/models/oss_installation.py
@@ -0,0 +1,9 @@
+import uuid
+
+from django.db import models
+
+
+class OssInstallation(models.Model):
+ installation_id = models.UUIDField(default=uuid.uuid4, editable=False)
+ created_at = models.DateTimeField(auto_now=True)
+ report_sent_at = models.DateTimeField(null=True, default=None)
diff --git a/engine/apps/oss_installation/tasks.py b/engine/apps/oss_installation/tasks.py
new file mode 100644
index 0000000000..2c11a54aa6
--- /dev/null
+++ b/engine/apps/oss_installation/tasks.py
@@ -0,0 +1,95 @@
+from urllib.parse import urljoin
+
+import requests
+from celery.utils.log import get_task_logger
+from django.conf import settings
+from django.utils import timezone
+from rest_framework import status
+
+from apps.base.utils import live_settings
+from apps.oss_installation.models import CloudHeartbeat, OssInstallation
+from apps.oss_installation.usage_stats import UsageStatsService
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+logger = get_task_logger(__name__)
+
+
+@shared_dedicated_queue_retry_task()
+def send_usage_stats_report():
+ logger.info("Start send_usage_stats_report")
+ installation = OssInstallation.objects.get_or_create()[0]
+ enabled = live_settings.SEND_ANONYMOUS_USAGE_STATS
+ if enabled:
+ logger.info("send_usage_stats_report is enabled")
+ service = UsageStatsService()
+ service.send_usage_stats_report()
+ else:
+ logger.info("send_usage_stats_report is disabled")
+ installation.report_sent_at = timezone.now()
+ installation.save()
+ logger.info("Finish send_usage_stats_report")
+
+
+def _setup_heartbeat_integration():
+ """Setup Grafana Cloud OnCall heartbeat integration."""
+ cloud_heartbeat = None
+ api_token = live_settings.GRAFANA_CLOUD_ONCALL_TOKEN
+ # don't specify a team in the data, so heartbeat integration will be created in the General.
+ data = {"type": "formatted_webhook", "name": f"OnCall {settings.BASE_URL}"}
+ url = urljoin(settings.GRAFANA_CLOUD_ONCALL_API_URL, "/api/v1/integrations/")
+ try:
+ headers = {"Authorization": api_token}
+ r = requests.post(url=url, data=data, headers=headers, timeout=5)
+ if r.status_code == status.HTTP_201_CREATED:
+ response_data = r.json()
+ cloud_heartbeat, _ = CloudHeartbeat.objects.update_or_create(
+ defaults={"integration_id": response_data["id"], "integration_url": response_data["heartbeat"]["link"]}
+ )
+ except requests.Timeout:
+ logger.warning("Unable to create cloud heartbeat integration. Request timeout.")
+ except requests.exceptions.RequestException as e:
+ logger.warning(f"Unable to create cloud heartbeat integration. Request exception {str(e)}.")
+ return cloud_heartbeat
+
+
+@shared_dedicated_queue_retry_task()
+def send_cloud_heartbeat():
+ """Send heartbeat to Grafana Cloud OnCall integration."""
+ if not live_settings.GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED or not live_settings.GRAFANA_CLOUD_ONCALL_TOKEN:
+ logger.info(
+ "Unable to send cloud heartbeat. Check values for GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED and GRAFANA_CLOUD_ONCALL_TOKEN."
+ )
+ return
+
+ logger.info("Start send cloud heartbeat")
+ try:
+ cloud_heartbeat = CloudHeartbeat.objects.get()
+ except CloudHeartbeat.DoesNotExist:
+ cloud_heartbeat = _setup_heartbeat_integration()
+
+ if cloud_heartbeat is None:
+ logger.warning("Unable to setup cloud heartbeat integration.")
+ return
+ cloud_heartbeat.success = False
+ try:
+ response = requests.get(cloud_heartbeat.integration_url, timeout=5)
+ logger.info(f"Send cloud heartbeat with response {response.status_code}")
+ except requests.Timeout:
+ logger.warning("Unable to send cloud heartbeat. Request timeout.")
+ except requests.exceptions.RequestException as e:
+ logger.warning(f"Unable to send cloud heartbeat. Request exception {str(e)}.")
+ else:
+ if response.status_code == status.HTTP_200_OK:
+ cloud_heartbeat.success = True
+ logger.info("Successfully send cloud heartbeat")
+ elif response.status_code == status.HTTP_403_FORBIDDEN:
+ # check for 403 because AlertChannelDefiningMixin returns 403 if no integration was found.
+ logger.info("Failed to send cloud heartbeat. Integration was not created yet")
+ # force re-creation on next run
+ cloud_heartbeat.delete()
+ else:
+ logger.info(f"Failed to send cloud heartbeat. response {response.status_code}")
+ # save result of cloud heartbeat if it wasn't deleted
+ if cloud_heartbeat.pk is not None:
+ cloud_heartbeat.save()
+ logger.info("Finish send cloud heartbeat")
diff --git a/engine/apps/oss_installation/urls.py b/engine/apps/oss_installation/urls.py
new file mode 100644
index 0000000000..956ffe7476
--- /dev/null
+++ b/engine/apps/oss_installation/urls.py
@@ -0,0 +1,7 @@
+from common.api_helpers.optional_slash_router import optional_slash_path
+
+from .views import CloudHeartbeatStatusView
+
+urlpatterns = [
+ optional_slash_path("cloud_heartbeat_status", CloudHeartbeatStatusView.as_view(), name="cloud_heartbeat_status"),
+]
diff --git a/engine/apps/oss_installation/usage_stats.py b/engine/apps/oss_installation/usage_stats.py
new file mode 100644
index 0000000000..db90cce8b3
--- /dev/null
+++ b/engine/apps/oss_installation/usage_stats.py
@@ -0,0 +1,50 @@
+import logging
+import platform
+from dataclasses import asdict, dataclass
+
+import requests
+from django.conf import settings
+from django.db.models import Sum
+
+from apps.alerts.models import AlertGroupCounter
+from apps.oss_installation.models import OssInstallation
+from apps.oss_installation.utils import active_oss_users_count
+
+USAGE_STATS_URL = "https://stats.grafana.org/oncall-usage-report"
+USAGE_STATS_HTTP_TIMEOUT = 500
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class UsageStatsReport:
+ version: str
+ os: str
+ arch: str
+ usage_stats_id: str
+ metrics: dict
+
+
+class UsageStatsService:
+ def get_usage_stats_report(self):
+ metrics = {}
+ metrics["active_users_count"] = active_oss_users_count()
+ total_alert_groups = AlertGroupCounter.objects.aggregate(Sum("value")).get("value__sum", 0)
+ metrics["alert_groups_count"] = total_alert_groups
+
+ usage_stats_id = OssInstallation.objects.get_or_create()[0].installation_id
+
+ return UsageStatsReport(
+ usage_stats_id=str(usage_stats_id),
+ os=platform.system(),
+ arch=platform.machine(),
+ version=settings.VERSION,
+ metrics=metrics,
+ )
+
+ def send_usage_stats_report(self):
+ report = self.get_usage_stats_report()
+ try:
+ requests.post(url=USAGE_STATS_URL, json=asdict(report), timeout=USAGE_STATS_HTTP_TIMEOUT)
+ except requests.exceptions.RequestException as e:
+ logging.info(f"Failed to send_usage_stats_report. msg={str(e)}")
diff --git a/engine/apps/oss_installation/utils.py b/engine/apps/oss_installation/utils.py
new file mode 100644
index 0000000000..fcfb537c79
--- /dev/null
+++ b/engine/apps/oss_installation/utils.py
@@ -0,0 +1,70 @@
+from contextlib import suppress
+
+from django.utils import timezone
+
+from apps.alerts.models import AlertGroupLogRecord, EscalationPolicy
+from apps.base.models import UserNotificationPolicyLogRecord
+from apps.public_api.constants import DEMO_USER_ID
+from apps.schedules.ical_utils import list_users_to_notify_from_ical_for_period
+from apps.schedules.models import OnCallSchedule
+from apps.user_management.models import User
+
+
+def active_oss_users_count():
+ """
+ active_oss_users_count returns count of active users of oss installation.
+ """
+
+ # Take logs for previous 24 hours
+ start = timezone.now() - timezone.timedelta(hours=24)
+ end = timezone.now()
+
+ # Take schedules for current month
+ schedule_start = timezone.now().replace(day=1, hour=0, minute=0, second=0, microsecond=0)
+ schedule_end = (schedule_start + timezone.timedelta(days=32)).replace(day=1)
+
+ unique_active_users = set()
+
+ unique_active_users.update(
+ list(
+ UserNotificationPolicyLogRecord.objects.filter(
+ created_at__gte=start, created_at__lt=end, author__isnull=False
+ )
+ .values_list("author_id", flat=True)
+ .distinct()
+ )
+ )
+
+ unique_active_users.update(
+ list(
+ AlertGroupLogRecord.objects.filter(
+ type__in=AlertGroupLogRecord.TYPES_FOR_LICENCE_CALCULATION,
+ created_at__gte=start,
+ created_at__lt=end,
+ author__isnull=False,
+ )
+ .values_list("author_id", flat=True)
+ .distinct()
+ )
+ )
+
+ # Get active users from notification policies
+ unique_active_users.update(
+ list(
+ EscalationPolicy.objects.filter(notify_to_users_queue__isnull=False).values_list(
+ "notify_to_users_queue__id", flat=True
+ )
+ )
+ )
+
+ for schedule in OnCallSchedule.objects.all():
+ users_from_schedule = list_users_to_notify_from_ical_for_period(schedule, schedule_start, schedule_end)
+ for user in users_from_schedule:
+ unique_active_users.add(user.pk)
+
+ # Remove demo user from active users
+ with suppress(User.DoesNotExist):
+ demo_user = User.objects.get(public_primary_key=DEMO_USER_ID)
+ with suppress(KeyError):
+ unique_active_users.remove(demo_user.pk)
+ return len(unique_active_users)
diff --git a/engine/apps/oss_installation/views/__init__.py b/engine/apps/oss_installation/views/__init__.py
new file mode 100644
index 0000000000..0716482b9e
--- /dev/null
+++ b/engine/apps/oss_installation/views/__init__.py
@@ -0,0 +1 @@
+from .cloud_heartbeat_status import CloudHeartbeatStatusView # noqa: F401
diff --git a/engine/apps/oss_installation/views/cloud_heartbeat_status.py b/engine/apps/oss_installation/views/cloud_heartbeat_status.py
new file mode 100644
index 0000000000..be553641e4
--- /dev/null
+++ b/engine/apps/oss_installation/views/cloud_heartbeat_status.py
@@ -0,0 +1,15 @@
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.auth_token.auth import PluginAuthentication
+from apps.oss_installation.models import CloudHeartbeat
+
+
+class CloudHeartbeatStatusView(APIView):
+ authentication_classes = (PluginAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ def get(self, request):
+ response = {"status": CloudHeartbeat.status()}
+ return Response(response)
diff --git a/engine/apps/public_api/__init__.py b/engine/apps/public_api/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/public_api/constants.py b/engine/apps/public_api/constants.py
new file mode 100644
index 0000000000..4a14df3faf
--- /dev/null
+++ b/engine/apps/public_api/constants.py
@@ -0,0 +1,69 @@
+from django.utils import dateparse
+
+DEMO_USER_ID = "U4DNY931HHJS5"
+DEMO_ORGANIZATION_ID = "TCNPY4A1BWUMP"
+DEMO_SLACK_USER_ID = "UALEXSLACKDJPK"
+DEMO_SLACK_TEAM_ID = "TALEXSLACKDJPK"
+DEMO_AUTH_TOKEN = "meowmeowmeow"
+DEMO_USER_USERNAME = "Alex"
+DEMO_USER_EMAIL = "public-api-demo-user-1@amixr.io"
+DEMO_INTEGRATION_ID = "CFRPV98RPR1U8"
+DEMO_INTEGRATION_LINK_TOKEN = "mReAoNwDm0eMwKo1mTeTwYo"
+DEMO_INTEGRATION_NAME = "Grafana :blush:"
+DEMO_ROUTE_ID_1 = "RIYGUJXCPFHXY"
+DEMO_ROUTE_ID_2 = "RVBE4RKQSCGJ2"
+DEMO_SLACK_CHANNEL_FOR_ROUTE_ID = "CH23212D"
+DEMO_ESCALATION_CHAIN_ID = "F5JU6KJET33FE"
+DEMO_ESCALATION_POLICY_ID_1 = "E3GA6SJETWWJS"
+DEMO_ESCALATION_POLICY_ID_2 = "E5JJTU52M5YM4"
+DEMO_SCHEDULE_ID_ICAL = "SBM7DV7BKFUYU"
+DEMO_SCHEDULE_ID_CALENDAR = "S3Z477AHDXTMF"
+DEMO_SCHEDULE_NAME_ICAL = "Demo schedule iCal"
+DEMO_SCHEDULE_NAME_CALENDAR = "Demo schedule Calendar"
+DEMO_SCHEDULE_ICAL_URL_PRIMARY = "https://example.com/meow_calendar.ics"
+DEMO_SCHEDULE_ICAL_URL_OVERRIDES = "https://example.com/meow_calendar_overrides.ics"
+DEMO_INCIDENT_ID = "I68T24C13IFW1"
+DEMO_INCIDENT_CREATED_AT = "2020-05-19T12:37:01.430444Z"
+DEMO_INCIDENT_RESOLVED_AT = "2020-05-19T13:37:01.429805Z"
+DEMO_ALERT_IDS = [
+ ("AA74DN7T4JQB6", "2020-05-11T20:07:43Z"),
+ ("AR9SSYFKE2PV7", "2020-05-11T20:07:54Z"),
+ ("AWJQSGEYYUFGH", "2020-05-11T20:07:58Z"),
+]
+DEMO_ALERT_PAYLOAD = {
+ "evalMatches": [
+ {"value": 100, "metric": "High value", "tags": None},
+ {"value": 200, "metric": "Higher Value", "tags": None},
+ ],
+ "message": "Someone is testing the alert notification within grafana.",
+ "ruleId": 0,
+ "ruleName": "Test notification",
+ "ruleUrl": "https://amixr.io/",
+ "state": "alerting",
+ "title": "[Alerting] Test notification",
+}
+VALID_DATE_FOR_DELETE_INCIDENT = dateparse.parse_date("2020-07-04")
+DEMO_SLACK_CHANNEL_NAME = "meow_channel"
+DEMO_SLACK_CHANNEL_SLACK_ID = "MEOW_SLACK_ID"
+DEMO_PERSONAL_NOTIFICATION_ID_1 = "NT79GA9I7E4DJ"
+DEMO_PERSONAL_NOTIFICATION_ID_2 = "ND9EHN5LN1DUU"
+DEMO_PERSONAL_NOTIFICATION_ID_3 = "NEF49YQ1HNPDD"
+DEMO_PERSONAL_NOTIFICATION_ID_4 = "NWAL6WFJNWDD8"
+DEMO_RESOLUTION_NOTE_ID = "M4BTQUS3PRHYQ"
+DEMO_RESOLUTION_NOTE_TEXT = "Demo resolution note"
+DEMO_RESOLUTION_NOTE_CREATED_AT = "2020-06-19T12:40:01.429805Z"
+DEMO_RESOLUTION_NOTE_SOURCE = "web"
+DEMO_CUSTOM_ACTION_ID = "KGEFG74LU1D8L"
+DEMO_CUSTOM_ACTION_NAME = "Publish Incident To Jira"
+DEMO_SLACK_USER_GROUP_ID = "GPFAPH7J7BKJB"
+DEMO_SLACK_USER_GROUP_SLACK_ID = "MEOW_SLACK_ID"
+DEMO_SLACK_USER_GROUP_NAME = "Meow Group"
+DEMO_SLACK_USER_GROUP_HANDLE = "meow_group"
+DEMO_ON_CALL_SHIFT_ID_1 = "OH3V5FYQEYJ6M"
+DEMO_ON_CALL_SHIFT_ID_2 = "O9WTH7CKM3KZW"
+DEMO_ON_CALL_SHIFT_NAME_1 = "Demo single event"
+DEMO_ON_CALL_SHIFT_NAME_2 = "Demo recurrent event"
+DEMO_ON_CALL_SHIFT_START_1 = "2020-09-10T08:00:00"
+DEMO_ON_CALL_SHIFT_START_2 = "2020-09-10T16:00:00"
+DEMO_ON_CALL_SHIFT_DURATION = 10800
+DEMO_ON_CALL_SHIFT_BY_DAY = ["MO", "WE", "FR"]
diff --git a/engine/apps/public_api/custom_renderers.py b/engine/apps/public_api/custom_renderers.py
new file mode 100644
index 0000000000..f68efccb46
--- /dev/null
+++ b/engine/apps/public_api/custom_renderers.py
@@ -0,0 +1,19 @@
+import json
+
+from rest_framework.renderers import BaseRenderer
+
+
+class CalendarRenderer(BaseRenderer):
+ """
+ A basic customer renderer to set the format to text to remove escape characters
+ on feed requests.
+ """
+
+ media_type = "text/calendar"
+ format = "txt"
+
+ def render(self, data, accepted_media_type=None, renderer_context=None):
+ if isinstance(data, bytes):
+ return data
+ error_response = json.dumps(data)
+ return bytes(error_response.encode("utf-8"))
diff --git a/engine/apps/public_api/helpers.py b/engine/apps/public_api/helpers.py
new file mode 100644
index 0000000000..f684e34ab1
--- /dev/null
+++ b/engine/apps/public_api/helpers.py
@@ -0,0 +1,25 @@
+from apps.public_api.constants import DEMO_AUTH_TOKEN, VALID_DATE_FOR_DELETE_INCIDENT
+from apps.slack.slack_client import SlackClientWithErrorHandling
+from apps.slack.slack_client.exceptions import SlackAPITokenException
+
+
+def is_demo_token_request(request):
+ if DEMO_AUTH_TOKEN == request.headers.get("Authorization"):
+ return True
+ return False
+
+
+def team_has_slack_token_for_deleting(alert_group):
+ if alert_group.slack_message and alert_group.slack_message.slack_team_identity:
+ sc = SlackClientWithErrorHandling(alert_group.slack_message.slack_team_identity.bot_access_token)
+ try:
+ sc.api_call(
+ "auth.test",
+ )
+ except SlackAPITokenException:
+ return False
+ return True
+
+
+def is_valid_group_creation_date(alert_group):
+ return alert_group.started_at.date() > VALID_DATE_FOR_DELETE_INCIDENT
diff --git a/engine/apps/public_api/serializers/__init__.py b/engine/apps/public_api/serializers/__init__.py
new file mode 100644
index 0000000000..d01a7f2e35
--- /dev/null
+++ b/engine/apps/public_api/serializers/__init__.py
@@ -0,0 +1,15 @@
+from .alerts import AlertSerializer # noqa: F401
+from .escalation_chains import EscalationChainSerializer # noqa: F401
+from .escalation_policies import EscalationPolicySerializer, EscalationPolicyUpdateSerializer # noqa: F401
+from .incidents import IncidentSerializer # noqa: F401
+from .integrations import IntegrationSerializer, IntegrationUpdateSerializer # noqa: F401
+from .maintenance import MaintainableObjectSerializerMixin # noqa: F401
+from .on_call_shifts import CustomOnCallShiftSerializer, CustomOnCallShiftUpdateSerializer # noqa: F401
+from .organizations import OrganizationSerializer # noqa: F401
+from .personal_notification_rules import ( # noqa: F401
+ PersonalNotificationRuleSerializer,
+ PersonalNotificationRuleUpdateSerializer,
+)
+from .routes import ChannelFilterSerializer, ChannelFilterUpdateSerializer # noqa: F401
+from .schedules_polymorphic import PolymorphicScheduleSerializer, PolymorphicScheduleUpdateSerializer # noqa: F401
+from .users import FastUserSerializer, UserSerializer # noqa: F401
diff --git a/engine/apps/public_api/serializers/action.py b/engine/apps/public_api/serializers/action.py
new file mode 100644
index 0000000000..db202b22c4
--- /dev/null
+++ b/engine/apps/public_api/serializers/action.py
@@ -0,0 +1,17 @@
+from rest_framework import serializers
+
+from apps.alerts.models import CustomButton
+from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField
+
+
+class ActionSerializer(serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ team_id = TeamPrimaryKeyRelatedField(allow_null=True, source="team")
+
+ class Meta:
+ model = CustomButton
+ fields = [
+ "id",
+ "name",
+ "team_id",
+ ]
diff --git a/engine/apps/public_api/serializers/alerts.py b/engine/apps/public_api/serializers/alerts.py
new file mode 100644
index 0000000000..3d725cc8d0
--- /dev/null
+++ b/engine/apps/public_api/serializers/alerts.py
@@ -0,0 +1,21 @@
+from rest_framework import serializers
+
+from apps.alerts.models import Alert
+from common.api_helpers.mixins import EagerLoadingMixin
+
+
+class AlertSerializer(EagerLoadingMixin, serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ alert_group_id = serializers.CharField(read_only=True, source="group.public_primary_key")
+ payload = serializers.JSONField(read_only=True, source="raw_request_data")
+
+ SELECT_RELATED = ["group"]
+
+ class Meta:
+ model = Alert
+ fields = [
+ "id",
+ "alert_group_id",
+ "created_at",
+ "payload",
+ ]
diff --git a/engine/apps/public_api/serializers/escalation_chains.py b/engine/apps/public_api/serializers/escalation_chains.py
new file mode 100644
index 0000000000..13b7fa75ac
--- /dev/null
+++ b/engine/apps/public_api/serializers/escalation_chains.py
@@ -0,0 +1,24 @@
+from rest_framework import serializers
+
+from apps.alerts.models import EscalationChain
+from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField
+from common.api_helpers.utils import CurrentOrganizationDefault
+
+
+class EscalationChainSerializer(serializers.ModelSerializer):
+ id = serializers.ReadOnlyField(source="public_primary_key")
+ organization = serializers.HiddenField(default=CurrentOrganizationDefault())
+ team_id = TeamPrimaryKeyRelatedField(required=False, allow_null=True, source="team")
+
+ class Meta:
+ model = EscalationChain
+ fields = (
+ "id",
+ "name",
+ "organization",
+ "team_id",
+ )
+
+
+class EscalationChainUpdateSerializer(EscalationChainSerializer):
+ team_id = TeamPrimaryKeyRelatedField(source="team", read_only=True)
diff --git a/engine/apps/public_api/serializers/escalation_policies.py b/engine/apps/public_api/serializers/escalation_policies.py
new file mode 100644
index 0000000000..e7903caea4
--- /dev/null
+++ b/engine/apps/public_api/serializers/escalation_policies.py
@@ -0,0 +1,306 @@
+import time
+from datetime import timedelta
+
+from django.utils.functional import cached_property
+from rest_framework import fields, serializers
+
+from apps.alerts.models import CustomButton, EscalationChain, EscalationPolicy
+from apps.schedules.models import OnCallSchedule
+from apps.slack.models import SlackUserGroup
+from apps.user_management.models import User
+from common.api_helpers.custom_fields import (
+ CustomTimeField,
+ OrganizationFilteredPrimaryKeyRelatedField,
+ UsersFilteredByOrganizationField,
+)
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.mixins import EagerLoadingMixin, OrderedModelSerializerMixin
+
+
+class EscalationPolicyTypeField(fields.CharField):
+ def to_representation(self, value):
+ return EscalationPolicy.PUBLIC_STEP_CHOICES_MAP[value]
+
+ def to_internal_value(self, data):
+ try:
+ step_type = [
+ key
+ for key, value in EscalationPolicy.PUBLIC_STEP_CHOICES_MAP.items()
+ if value == data and key in EscalationPolicy.PUBLIC_STEP_CHOICES
+ ][0]
+ except IndexError:
+ raise BadRequest(detail="Invalid escalation step type")
+ if step_type not in EscalationPolicy.PUBLIC_STEP_CHOICES:
+ raise BadRequest(detail="Invalid escalation step type")
+ return step_type
+
+
+class EscalationPolicySerializer(EagerLoadingMixin, OrderedModelSerializerMixin, serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ escalation_chain_id = OrganizationFilteredPrimaryKeyRelatedField(
+ queryset=EscalationChain.objects, source="escalation_chain"
+ )
+ position = serializers.IntegerField(required=False, source="order")
+ type = EscalationPolicyTypeField(source="step", allow_null=True)
+ duration = serializers.ChoiceField(required=False, source="wait_delay", choices=EscalationPolicy.DURATION_CHOICES)
+ persons_to_notify = UsersFilteredByOrganizationField(
+ queryset=User.objects,
+ required=False,
+ source="notify_to_users_queue",
+ )
+ persons_to_notify_next_each_time = UsersFilteredByOrganizationField(
+ queryset=User.objects,
+ required=False,
+ source="notify_to_users_queue",
+ )
+ notify_on_call_from_schedule = OrganizationFilteredPrimaryKeyRelatedField(
+ queryset=OnCallSchedule.objects, required=False, source="notify_schedule"
+ )
+ group_to_notify = OrganizationFilteredPrimaryKeyRelatedField(
+ queryset=SlackUserGroup.objects,
+ required=False,
+ source="notify_to_group",
+ filter_field="slack_team_identity__organizations",
+ )
+ action_to_trigger = OrganizationFilteredPrimaryKeyRelatedField(
+ queryset=CustomButton.objects,
+ required=False,
+ source="custom_button_trigger",
+ )
+ manual_order = serializers.BooleanField(default=False, write_only=True)
+ important = serializers.BooleanField(required=False)
+ notify_if_time_from = CustomTimeField(required=False, source="from_time")
+ notify_if_time_to = CustomTimeField(required=False, source="to_time")
+
+ class Meta:
+ model = EscalationPolicy
+ fields = [
+ "id",
+ "escalation_chain_id",
+ "position",
+ "type",
+ "duration",
+ "important",
+ "persons_to_notify",
+ "persons_to_notify_next_each_time",
+ "notify_on_call_from_schedule",
+ "group_to_notify",
+ "action_to_trigger",
+ "manual_order",
+ "notify_if_time_from",
+ "notify_if_time_to",
+ "num_alerts_in_window",
+ "num_minutes_in_window",
+ ]
+
+ PREFETCH_RELATED = ["notify_to_users_queue"]
+ SELECT_RELATED = ["escalation_chain"]
+
+ @cached_property
+ def escalation_chain(self):
+ if self.instance is not None:
+ escalation_chain = self.instance.escalation_chain
+ else:
+ escalation_chain = EscalationChain.objects.get(public_primary_key=self.initial_data["escalation_chain_id"])
+ return escalation_chain
+
+ def validate_type(self, step_type):
+ organization = self.context["request"].auth.organization
+
+ if step_type == EscalationPolicy.STEP_FINAL_NOTIFYALL and organization.slack_team_identity is None:
+ raise BadRequest(detail="Invalid escalation step type: step is Slack-specific")
+
+ return step_type
+
+ def validate_action_to_trigger(self, action_to_trigger):
+ if action_to_trigger.team != self.escalation_chain.team:
+ raise BadRequest(detail="Action must be assigned to the same team as the escalation chain")
+
+ return action_to_trigger
+
+ def validate_notify_on_call_from_schedule(self, schedule):
+ if schedule.team != self.escalation_chain.team:
+ raise BadRequest(detail="Schedule must be assigned to the same team as the escalation chain")
+
+ return schedule
+
+ def create(self, validated_data):
+ validated_data = self._correct_validated_data(validated_data)
+ manual_order = validated_data.pop("manual_order")
+ if not manual_order:
+ order = validated_data.pop("order", None)
+ escalation_chain_id = validated_data.get("escalation_chain")
+ # validate 'order' value before creation
+ self._validate_order(order, {"escalation_chain_id": escalation_chain_id})
+
+ instance = super().create(validated_data)
+ self._change_position(order, instance)
+ else:
+ instance = super().create(validated_data)
+
+ return instance
+
+ def to_representation(self, instance):
+ step = instance.step
+ result = super().to_representation(instance)
+ result = self._get_field_to_represent(step, result)
+ if "duration" in result and result["duration"] is not None:
+ result["duration"] = result["duration"].seconds
+ return result
+
+ def to_internal_value(self, data):
+ if data.get("duration", None):
+ try:
+ time.strptime(data["duration"], "%H:%M:%S")
+ except (ValueError, TypeError):
+ try:
+ data["duration"] = str(timedelta(seconds=data["duration"]))
+ except (ValueError, TypeError):
+ raise BadRequest(detail="Invalid duration format")
+ if data.get("persons_to_notify", []) is None: # terraform case
+ data["persons_to_notify"] = []
+ if data.get("persons_to_notify_next_each_time", []) is None: # terraform case
+ data["persons_to_notify_next_each_time"] = []
+ return super().to_internal_value(data)
+
+ def _get_field_to_represent(self, step, result):
+ fields_to_remove = [
+ "duration",
+ "persons_to_notify",
+ "persons_to_notify_next_each_time",
+ "notify_on_call_from_schedule",
+ "group_to_notify",
+ "important",
+ "action_to_trigger",
+ "notify_if_time_from",
+ "notify_if_time_to",
+ "num_alerts_in_window",
+ "num_minutes_in_window",
+ ]
+ if step == EscalationPolicy.STEP_WAIT:
+ fields_to_remove.remove("duration")
+ elif step in [EscalationPolicy.STEP_NOTIFY_SCHEDULE, EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT]:
+ fields_to_remove.remove("notify_on_call_from_schedule")
+ elif step in [
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
+ ]:
+ fields_to_remove.remove("persons_to_notify")
+ elif step == EscalationPolicy.STEP_NOTIFY_USERS_QUEUE:
+ fields_to_remove.remove("persons_to_notify_next_each_time")
+ elif step in [EscalationPolicy.STEP_NOTIFY_GROUP, EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT]:
+ fields_to_remove.remove("group_to_notify")
+ elif step == EscalationPolicy.STEP_TRIGGER_CUSTOM_BUTTON:
+ fields_to_remove.remove("action_to_trigger")
+ elif step == EscalationPolicy.STEP_NOTIFY_IF_TIME:
+ fields_to_remove.remove("notify_if_time_from")
+ fields_to_remove.remove("notify_if_time_to")
+ elif step == EscalationPolicy.STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW:
+ fields_to_remove.remove("num_alerts_in_window")
+ fields_to_remove.remove("num_minutes_in_window")
+
+ if (
+ step in EscalationPolicy.DEFAULT_TO_IMPORTANT_STEP_MAPPING
+ or step in EscalationPolicy.DEFAULT_TO_IMPORTANT_STEP_MAPPING.values()
+ ):
+ fields_to_remove.remove("important")
+ result["important"] = step not in EscalationPolicy.DEFAULT_TO_IMPORTANT_STEP_MAPPING
+ for field in fields_to_remove:
+ result.pop(field, None)
+ return result
+
+ def _correct_validated_data(self, validated_data):
+ validated_data_fields_to_remove = [
+ "notify_to_users_queue",
+ "wait_delay",
+ "notify_schedule",
+ "notify_to_group",
+ "custom_button_trigger",
+ "from_time",
+ "to_time",
+ "num_alerts_in_window",
+ "num_minutes_in_window",
+ ]
+ step = validated_data.get("step")
+ important = validated_data.pop("important", None)
+
+ if step in [EscalationPolicy.STEP_NOTIFY_SCHEDULE, EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT]:
+ validated_data_fields_to_remove.remove("notify_schedule")
+ elif step == EscalationPolicy.STEP_WAIT:
+ validated_data_fields_to_remove.remove("wait_delay")
+ elif step in [
+ EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
+ ]:
+ validated_data_fields_to_remove.remove("notify_to_users_queue")
+ elif step in [EscalationPolicy.STEP_NOTIFY_GROUP, EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT]:
+ validated_data_fields_to_remove.remove("notify_to_group")
+ elif step == EscalationPolicy.STEP_TRIGGER_CUSTOM_BUTTON:
+ validated_data_fields_to_remove.remove("custom_button_trigger")
+ elif step == EscalationPolicy.STEP_NOTIFY_IF_TIME:
+ validated_data_fields_to_remove.remove("from_time")
+ validated_data_fields_to_remove.remove("to_time")
+ elif step == EscalationPolicy.STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW:
+ validated_data_fields_to_remove.remove("num_alerts_in_window")
+ validated_data_fields_to_remove.remove("num_minutes_in_window")
+
+ for field in validated_data_fields_to_remove:
+ validated_data.pop(field, None)
+
+ if step in EscalationPolicy.DEFAULT_TO_IMPORTANT_STEP_MAPPING and important:
+ validated_data["step"] = EscalationPolicy.DEFAULT_TO_IMPORTANT_STEP_MAPPING[step]
+ elif step in EscalationPolicy.DEFAULT_TO_IMPORTANT_STEP_MAPPING.values() and important is False:
+ validated_data["step"] = [
+ key for key, value in EscalationPolicy.DEFAULT_TO_IMPORTANT_STEP_MAPPING.items() if value == step
+ ][0]
+ return validated_data
+
+
+class EscalationPolicyUpdateSerializer(EscalationPolicySerializer):
+ escalation_chain_id = OrganizationFilteredPrimaryKeyRelatedField(read_only=True, source="escalation_chain")
+ type = EscalationPolicyTypeField(required=False, source="step", allow_null=True)
+
+ class Meta(EscalationPolicySerializer.Meta):
+ read_only_fields = ("route_id",)
+
+ def update(self, instance, validated_data):
+ if "step" in validated_data:
+ step = validated_data["step"]
+ else:
+ step = instance.step
+
+ validated_data["step"] = step
+ validated_data = self._correct_validated_data(validated_data)
+
+ if step != instance.step:
+ if step is not None:
+ if step not in [EscalationPolicy.STEP_NOTIFY_SCHEDULE, EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT]:
+ instance.notify_schedule = None
+ if step != EscalationPolicy.STEP_WAIT:
+ instance.wait_delay = None
+ if step not in [
+ EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
+ ]:
+ instance.notify_to_users_queue.clear()
+ if step not in [EscalationPolicy.STEP_NOTIFY_GROUP, EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT]:
+ instance.notify_to_group = None
+ if step != EscalationPolicy.STEP_TRIGGER_CUSTOM_BUTTON:
+ instance.custom_button_trigger = None
+ if step != EscalationPolicy.STEP_NOTIFY_IF_TIME:
+ instance.from_time = None
+ instance.to_time = None
+ if step != EscalationPolicy.STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW:
+ instance.num_alerts_in_window = None
+ instance.num_minutes_in_window = None
+
+ manual_order = validated_data.pop("manual_order")
+
+ if not manual_order:
+ order = validated_data.pop("order", None)
+ self._validate_order(order, {"escalation_chain_id": instance.escalation_chain_id})
+ self._change_position(order, instance)
+
+ return super().update(instance, validated_data)
diff --git a/engine/apps/public_api/serializers/incidents.py b/engine/apps/public_api/serializers/incidents.py
new file mode 100644
index 0000000000..1d3bc174e2
--- /dev/null
+++ b/engine/apps/public_api/serializers/incidents.py
@@ -0,0 +1,47 @@
+from rest_framework import serializers
+
+from apps.alerts.models import AlertGroup
+from common.api_helpers.mixins import EagerLoadingMixin
+
+
+class IncidentSerializer(EagerLoadingMixin, serializers.ModelSerializer):
+
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ integration_id = serializers.CharField(source="channel.public_primary_key")
+ route_id = serializers.SerializerMethodField()
+ created_at = serializers.DateTimeField(source="started_at")
+ alerts_count = serializers.SerializerMethodField()
+ title = serializers.SerializerMethodField()
+ state = serializers.SerializerMethodField()
+
+ SELECT_RELATED = ["channel", "channel_filter"]
+ PREFETCH_RELATED = ["alerts"]
+
+ class Meta:
+ model = AlertGroup
+ fields = [
+ "id",
+ "integration_id",
+ "route_id",
+ "alerts_count",
+ "state",
+ "created_at",
+ "resolved_at",
+ "acknowledged_at",
+ "title",
+ ]
+
+ def get_alerts_count(self, obj):
+ return len(obj.alerts.all())
+
+ def get_title(self, obj):
+ return obj.alerts.all()[0].title
+
+ def get_state(self, obj):
+ return obj.state
+
+ def get_route_id(self, obj):
+ if obj.channel_filter is not None:
+ return obj.channel_filter.public_primary_key
+ else:
+ return None
diff --git a/engine/apps/public_api/serializers/integrations.py b/engine/apps/public_api/serializers/integrations.py
new file mode 100644
index 0000000000..82d418c04a
--- /dev/null
+++ b/engine/apps/public_api/serializers/integrations.py
@@ -0,0 +1,191 @@
+from django.core.exceptions import ObjectDoesNotExist
+from jinja2 import TemplateSyntaxError
+from rest_framework import fields, serializers
+
+from apps.alerts.grafana_alerting_sync_manager.grafana_alerting_sync import GrafanaAlertingSyncManager
+from apps.alerts.models import AlertReceiveChannel
+from apps.public_api.constants import DEMO_INTEGRATION_LINK_TOKEN
+from apps.public_api.helpers import is_demo_token_request
+from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.mixins import EagerLoadingMixin
+from common.jinja_templater import jinja_template_env
+from common.utils import timed_lru_cache
+
+from .integtration_heartbeat import IntegrationHeartBeatSerializer
+from .maintenance import MaintainableObjectSerializerMixin
+from .routes import DefaultChannelFilterSerializer
+
+
+class IntegrationTypeField(fields.CharField):
+ def to_representation(self, value):
+ return AlertReceiveChannel.INTEGRATIONS_TO_REVERSE_URL_MAP[value]
+
+ def to_internal_value(self, data):
+ try:
+ integration_type = [
+ key for key, value in AlertReceiveChannel.INTEGRATIONS_TO_REVERSE_URL_MAP.items() if value == data
+ ][0]
+ except IndexError:
+ raise BadRequest(detail="Invalid integration type")
+ return integration_type
+
+
+class IntegrationSerializer(EagerLoadingMixin, serializers.ModelSerializer, MaintainableObjectSerializerMixin):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ name = serializers.CharField(required=False, source="verbal_name")
+ team_id = TeamPrimaryKeyRelatedField(required=False, allow_null=True, source="team")
+ link = serializers.ReadOnlyField(source="integration_url")
+ type = IntegrationTypeField(source="integration")
+ templates = serializers.DictField(required=False)
+ default_route = serializers.DictField(required=False)
+ heartbeat = serializers.SerializerMethodField()
+
+ PREFETCH_RELATED = ["channel_filters"]
+ SELECT_RELATED = ["organization", "integration_heartbeat"]
+
+ class Meta:
+ model = AlertReceiveChannel
+ fields = MaintainableObjectSerializerMixin.Meta.fields + [
+ "id",
+ "name",
+ "team_id",
+ "link",
+ "type",
+ "default_route",
+ "templates",
+ "heartbeat",
+ ]
+
+ def to_representation(self, instance):
+ result = super().to_representation(instance)
+ default_route = self._get_default_route_iterative(instance)
+ serializer = DefaultChannelFilterSerializer(default_route, context=self.context)
+ result["default_route"] = serializer.data
+ if is_demo_token_request(self.context["request"]):
+ # Replace integration token to not receive alerts on demo integration
+ link = result["link"]
+ real_token = instance.token
+ link = link.replace(real_token, DEMO_INTEGRATION_LINK_TOKEN)
+ result["link"] = link
+
+ return result
+
+ def create(self, validated_data):
+ validated_data = self._correct_validated_data(validated_data)
+ validated_data.pop("default_route", None)
+ organization = self.context["request"].auth.organization
+ integration = validated_data.get("integration")
+ if integration == AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING:
+ connection_error = GrafanaAlertingSyncManager.check_for_connection_errors(organization)
+ if connection_error:
+ raise serializers.ValidationError(connection_error)
+ instance = AlertReceiveChannel.create(
+ **validated_data,
+ author=self.context["request"].user,
+ organization=organization,
+ )
+ return instance
+
+ def validate(self, attrs):
+ organization = self.context["request"].auth.organization
+ verbal_name = attrs.get("verbal_name", None)
+ if verbal_name is None:
+ return attrs
+ try:
+ obj = AlertReceiveChannel.objects.get(organization=organization, verbal_name=verbal_name)
+ except AlertReceiveChannel.DoesNotExist:
+ return attrs
+ if self.instance and obj.id == self.instance.id:
+ return attrs
+ else:
+ raise BadRequest(detail="Integration with this name already exists")
+
+ def _correct_validated_data(self, validated_data):
+ templates = validated_data.pop("templates", {})
+ for template_name, templates_for_notification_channel in templates.items():
+ if type(templates_for_notification_channel) is dict:
+ for attr, template in templates_for_notification_channel.items():
+ try:
+ validated_data[AlertReceiveChannel.PUBLIC_TEMPLATES_FIELDS[template_name][attr]] = template
+ except KeyError:
+ raise BadRequest(detail="Invalid template data")
+ elif type(templates_for_notification_channel) is str:
+ try:
+ validated_data[
+ AlertReceiveChannel.PUBLIC_TEMPLATES_FIELDS[template_name]
+ ] = templates_for_notification_channel
+ except KeyError:
+ raise BadRequest(detail="Invalid template data")
+ elif templates_for_notification_channel is None:
+ try:
+ template_to_set_to_default = AlertReceiveChannel.PUBLIC_TEMPLATES_FIELDS[template_name]
+ if type(template_to_set_to_default) is str:
+ validated_data[AlertReceiveChannel.PUBLIC_TEMPLATES_FIELDS[template_name]] = None
+ elif type(template_to_set_to_default) is dict:
+ for key in template_to_set_to_default.keys():
+ validated_data[AlertReceiveChannel.PUBLIC_TEMPLATES_FIELDS[template_name][key]] = None
+ except KeyError:
+ raise BadRequest(detail="Invalid template data")
+
+ return validated_data
+
+ def validate_templates(self, templates):
+ if not isinstance(templates, dict):
+ raise BadRequest(detail="Invalid template data")
+
+ for notification_channel in ["slack", "web", "sms", "phone_call", "email", "telegram"]:
+ template_data = templates.get(notification_channel, {})
+ if template_data is None:
+ continue
+ if not isinstance(template_data, dict):
+ raise BadRequest(detail=f"Invalid {notification_channel} template data")
+ for attr, attr_template in template_data.items():
+ if attr_template is None:
+ continue
+ try:
+ jinja_template_env.from_string(attr_template)
+ except TemplateSyntaxError:
+ raise BadRequest(detail=f"invalid {notification_channel} {attr} template")
+
+ for common_template in ["resolve_signal", "grouping_key"]:
+ template_data = templates.get(common_template, "")
+ if template_data is None:
+ continue
+ if not isinstance(template_data, str):
+ raise BadRequest(detail=f"Invalid {common_template} template data")
+ try:
+ jinja_template_env.from_string(template_data)
+ except TemplateSyntaxError:
+ raise BadRequest(detail=f"Invalid {common_template} template data")
+ return templates
+
+ def get_heartbeat(self, obj):
+ try:
+ heartbeat = obj.integration_heartbeat
+ except ObjectDoesNotExist:
+ return None
+ return IntegrationHeartBeatSerializer(heartbeat).data
+
+ @timed_lru_cache(timeout=5)
+ def _get_default_route_iterative(self, obj):
+ """
+ Gets default route iterative to not hit db on each integration instance.
+ """
+ for filter in obj.channel_filters.all():
+ if filter.is_default:
+ return filter
+
+
+class IntegrationUpdateSerializer(IntegrationSerializer):
+ type = IntegrationTypeField(source="integration", read_only=True)
+ team_id = TeamPrimaryKeyRelatedField(source="team", read_only=True)
+
+ def update(self, instance, validated_data):
+ validated_data = self._correct_validated_data(validated_data)
+ default_route_data = validated_data.pop("default_route", {})
+ default_route = instance.default_channel_filter
+ serializer = DefaultChannelFilterSerializer(default_route, default_route_data, context=self.context)
+ serializer.is_valid(raise_exception=True)
+ serializer.save()
+ return super().update(instance, validated_data)
diff --git a/engine/apps/public_api/serializers/integtration_heartbeat.py b/engine/apps/public_api/serializers/integtration_heartbeat.py
new file mode 100644
index 0000000000..8344891447
--- /dev/null
+++ b/engine/apps/public_api/serializers/integtration_heartbeat.py
@@ -0,0 +1,11 @@
+from rest_framework import serializers
+
+from apps.heartbeat.models import IntegrationHeartBeat
+
+
+class IntegrationHeartBeatSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = IntegrationHeartBeat
+ fields = [
+ "link",
+ ]
diff --git a/engine/apps/public_api/serializers/maintenance.py b/engine/apps/public_api/serializers/maintenance.py
new file mode 100644
index 0000000000..dab10ec1fe
--- /dev/null
+++ b/engine/apps/public_api/serializers/maintenance.py
@@ -0,0 +1,36 @@
+import datetime
+
+from rest_framework import serializers
+
+from apps.alerts.models import MaintainableObject
+
+
+class MaintainableObjectSerializerMixin(serializers.Serializer):
+
+ maintenance_mode = serializers.SerializerMethodField()
+
+ # For some reason maintenance_started_at's format is flaky. Forcing the one listed in docs.
+ maintenance_started_at = serializers.DateTimeField(read_only=True, format="%Y-%m-%dT%H:%M:%SZ")
+ maintenance_end_at = serializers.SerializerMethodField()
+
+ class Meta:
+ """
+ Child's Meta should re-use fields and read_only_fields. Please avoid simple overriding.
+ """
+
+ fields = [
+ "maintenance_mode",
+ "maintenance_started_at",
+ "maintenance_end_at",
+ ]
+
+ def get_maintenance_mode(self, obj: MaintainableObject) -> str:
+ if obj.get_maintenance_mode_display() is None:
+ return None
+ return str(obj.get_maintenance_mode_display()).lower()
+
+ def get_maintenance_end_at(self, obj: MaintainableObject) -> str:
+ if obj.till_maintenance_timestamp is not None:
+ return serializers.DateTimeField().to_representation(
+ datetime.datetime.fromtimestamp(obj.till_maintenance_timestamp)
+ )
diff --git a/engine/apps/public_api/serializers/on_call_shifts.py b/engine/apps/public_api/serializers/on_call_shifts.py
new file mode 100644
index 0000000000..29a148eb29
--- /dev/null
+++ b/engine/apps/public_api/serializers/on_call_shifts.py
@@ -0,0 +1,325 @@
+import time
+
+from rest_framework import fields, serializers
+
+from apps.schedules.models import CustomOnCallShift
+from apps.schedules.tasks import (
+ drop_cached_ical_task,
+ schedule_notify_about_empty_shifts_in_schedule,
+ schedule_notify_about_gaps_in_schedule,
+)
+from apps.user_management.models import User
+from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField, UsersFilteredByOrganizationField
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.mixins import EagerLoadingMixin
+from common.api_helpers.utils import CurrentOrganizationDefault
+
+
+class RollingUsersField(serializers.ListField):
+ def to_representation(self, value):
+ result = [list(d.values()) for d in value]
+ return result
+
+
+class CustomOnCallShiftTypeField(fields.CharField):
+ def to_representation(self, value):
+ return CustomOnCallShift.PUBLIC_TYPE_CHOICES_MAP[value]
+
+ def to_internal_value(self, data):
+ try:
+ shift_type = [
+ key
+ for key, value in CustomOnCallShift.PUBLIC_TYPE_CHOICES_MAP.items()
+ if value == data and key in CustomOnCallShift.PUBLIC_TYPE_CHOICES_MAP
+ ][0]
+ except IndexError:
+ raise BadRequest(detail="Invalid shift type")
+ return shift_type
+
+
+class CustomOnCallShiftWeekStartField(fields.CharField):
+ def to_representation(self, value):
+ return CustomOnCallShift.ICAL_WEEKDAY_MAP[value]
+
+ def to_internal_value(self, data):
+ try:
+ week_start = [
+ key
+ for key, value in CustomOnCallShift.ICAL_WEEKDAY_MAP.items()
+ if value == data and key in CustomOnCallShift.ICAL_WEEKDAY_MAP
+ ][0]
+ except IndexError:
+ raise BadRequest(
+ detail="Invalid day format for week start field. "
+ "Should be one of the following: 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU'"
+ )
+ return week_start
+
+
+class CustomOnCallShiftFrequencyField(fields.CharField):
+ def to_representation(self, value):
+ return CustomOnCallShift.PUBLIC_FREQUENCY_CHOICES_MAP[value]
+
+ def to_internal_value(self, data):
+ try:
+ frequency = [
+ key
+ for key, value in CustomOnCallShift.PUBLIC_FREQUENCY_CHOICES_MAP.items()
+ if value == data and key in CustomOnCallShift.PUBLIC_FREQUENCY_CHOICES_MAP
+ ][0]
+ except IndexError:
+ raise BadRequest(detail="Invalid frequency type")
+ return frequency
+
+
+class CustomOnCallShiftSerializer(EagerLoadingMixin, serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ organization = serializers.HiddenField(default=CurrentOrganizationDefault())
+ team_id = TeamPrimaryKeyRelatedField(required=False, allow_null=True, source="team")
+ type = CustomOnCallShiftTypeField()
+ time_zone = serializers.CharField(required=False, allow_null=True)
+ users = UsersFilteredByOrganizationField(queryset=User.objects, required=False)
+ frequency = CustomOnCallShiftFrequencyField(required=False, allow_null=True)
+ week_start = CustomOnCallShiftWeekStartField(required=False)
+ level = serializers.IntegerField(required=False, source="priority_level")
+ by_day = serializers.ListField(required=False, allow_null=True)
+ by_month = serializers.ListField(required=False, allow_null=True)
+ by_monthday = serializers.ListField(required=False, allow_null=True)
+ rolling_users = RollingUsersField(
+ allow_null=True,
+ required=False,
+ child=UsersFilteredByOrganizationField(queryset=User.objects, required=False, allow_null=True),
+ )
+
+ class Meta:
+ model = CustomOnCallShift
+ fields = [
+ "id",
+ "organization",
+ "team_id",
+ "name",
+ "type",
+ "time_zone",
+ "level",
+ "start",
+ "duration",
+ "frequency",
+ "interval",
+ "week_start",
+ "by_day",
+ "by_month",
+ "by_monthday",
+ "source",
+ "users",
+ "rolling_users",
+ "start_rotation_from_user_index",
+ ]
+ extra_kwargs = {
+ "interval": {"required": False, "allow_null": True},
+ "source": {"required": False, "write_only": True},
+ }
+
+ PREFETCH_RELATED = ["users"]
+
+ def create(self, validated_data):
+ self._validate_frequency_and_week_start(
+ validated_data["type"], validated_data.get("frequency"), validated_data.get("week_start")
+ )
+ validated_data = self._correct_validated_data(validated_data["type"], validated_data)
+ self._validate_start_rotation_from_user_index(
+ validated_data["type"],
+ validated_data.get("start_rotation_from_user_index"),
+ )
+ self._validate_frequency_daily(
+ validated_data["type"],
+ validated_data.get("frequency"),
+ validated_data.get("by_day"),
+ validated_data.get("by_monthday"),
+ )
+ instance = super().create(validated_data)
+ return instance
+
+ def validate_name(self, name):
+ organization = self.context["request"].auth.organization
+ if name is None:
+ return name
+ try:
+ obj = CustomOnCallShift.objects.get(organization=organization, name=name)
+ except CustomOnCallShift.DoesNotExist:
+ return name
+ if self.instance and obj.id == self.instance.id:
+ return name
+ else:
+ raise BadRequest(detail="On-call shift with this name already exists")
+
+ def validate_by_day(self, by_day):
+ if by_day:
+ for day in by_day:
+ if day not in CustomOnCallShift.ICAL_WEEKDAY_MAP.values():
+ raise BadRequest(
+ detail="Invalid day value in by_day field. "
+ "Valid values: 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU'"
+ )
+ return by_day
+
+ def validate_by_month(self, by_month):
+ if by_month:
+ for month in by_month:
+ if not isinstance(month, int) or not 1 <= month <= 12:
+ raise BadRequest(detail="Invalid month value in by_month field. Valid values: from 1 to 12")
+ return by_month
+
+ def validate_by_monthday(self, by_monthday):
+ if by_monthday:
+ for day in by_monthday:
+ if not isinstance(day, int) or not -31 <= day <= 31 or day == 0:
+ raise BadRequest(
+ detail="Invalid monthday value in by_monthday field. "
+ "Valid values: from 1 to 31 and from -31 to -1"
+ )
+ return by_monthday
+
+ def validate_interval(self, interval):
+ if interval is not None:
+ if not isinstance(interval, int) or interval <= 0:
+ raise BadRequest(detail="Invalid value for interval")
+ return interval
+
+ def validate_rolling_users(self, rolling_users):
+ result = []
+ for users in rolling_users:
+ users_dict = dict()
+ for user in users:
+ users_dict[user.pk] = user.public_primary_key
+ result.append(users_dict)
+ return result
+
+ def _validate_frequency_and_week_start(self, event_type, frequency, week_start):
+ if event_type != CustomOnCallShift.TYPE_SINGLE_EVENT:
+ if frequency is None:
+ raise BadRequest(detail="Field 'frequency' is required for this on-call shift type")
+ elif frequency == CustomOnCallShift.FREQUENCY_WEEKLY and week_start is None:
+ raise BadRequest(detail="Field 'week_start' is required for frequency type 'weekly'")
+
+ def _validate_frequency_daily(self, event_type, frequency, by_day, by_monthday):
+ if event_type == CustomOnCallShift.TYPE_ROLLING_USERS_EVENT:
+ if frequency == CustomOnCallShift.FREQUENCY_DAILY:
+ if by_day or by_monthday:
+ raise BadRequest(
+ detail="Day limits are temporarily disabled for on-call shifts with type 'rolling_users' "
+ "and frequency 'daily'"
+ )
+
+ def _validate_start_rotation_from_user_index(self, type, index):
+ if type == CustomOnCallShift.TYPE_ROLLING_USERS_EVENT and index is None:
+ raise BadRequest(detail="Field 'start_rotation_from_user_index' is required for this on-call shift type")
+
+ def _validate_start(self, start):
+ try:
+ time.strptime(start, "%Y-%m-%dT%H:%M:%S")
+ except (TypeError, ValueError):
+ raise BadRequest(detail="Invalid datetime format, should be \"yyyy-mm-dd'T'hh:mm:ss\"")
+
+ def to_internal_value(self, data):
+ if data.get("users", []) is None: # terraform case
+ data["users"] = []
+ if data.get("rolling_users", []) is None: # terraform case
+ data["rolling_users"] = []
+ if data.get("source") != CustomOnCallShift.SOURCE_TERRAFORM:
+ data["source"] = CustomOnCallShift.SOURCE_API
+ if data.get("start") is not None:
+ self._validate_start(data["start"])
+ result = super().to_internal_value(data)
+ return result
+
+ def to_representation(self, instance):
+ result = super().to_representation(instance)
+ result["duration"] = int(instance.duration.total_seconds())
+ result["start"] = instance.start.strftime("%Y-%m-%dT%H:%M:%S")
+ result = self._get_fields_to_represent(instance, result)
+ return result
+
+ def _get_fields_to_represent(self, instance, result):
+ event_type = instance.type
+ fields_to_remove_map = {
+ CustomOnCallShift.TYPE_SINGLE_EVENT: [
+ "frequency",
+ "interval",
+ "by_day",
+ "by_month",
+ "by_monthday",
+ "week_start",
+ "rolling_users",
+ "start_rotation_from_user_index",
+ ],
+ CustomOnCallShift.TYPE_RECURRENT_EVENT: ["rolling_users", "start_rotation_from_user_index"],
+ CustomOnCallShift.TYPE_ROLLING_USERS_EVENT: ["users"],
+ }
+ for field in fields_to_remove_map[event_type]:
+ result.pop(field, None)
+
+ # represent field week_start only for events with frequency "weekly"
+ if instance.frequency != CustomOnCallShift.FREQUENCY_WEEKLY:
+ result.pop("week_start", None)
+
+ return result
+
+ def _correct_validated_data(self, event_type, validated_data):
+ fields_to_update_map = {
+ CustomOnCallShift.TYPE_SINGLE_EVENT: [
+ "frequency",
+ "interval",
+ "by_day",
+ "by_month",
+ "by_monthday",
+ "rolling_users",
+ "start_rotation_from_user_index",
+ ],
+ CustomOnCallShift.TYPE_RECURRENT_EVENT: ["rolling_users", "start_rotation_from_user_index"],
+ CustomOnCallShift.TYPE_ROLLING_USERS_EVENT: ["users"],
+ }
+ for field in fields_to_update_map[event_type]:
+ validated_data[field] = None if field != "users" else []
+
+ validated_data_list_fields = ["by_day", "by_month", "by_monthday", "rolling_users"]
+
+ for field in validated_data_list_fields:
+ if isinstance(validated_data.get(field), list) and len(validated_data[field]) == 0:
+ validated_data[field] = None
+ if validated_data.get("start") is not None:
+ validated_data["start"] = validated_data["start"].replace(tzinfo=None)
+ return validated_data
+
+
+class CustomOnCallShiftUpdateSerializer(CustomOnCallShiftSerializer):
+ type = CustomOnCallShiftTypeField(required=False)
+ duration = serializers.DurationField(required=False)
+ name = serializers.CharField(required=False)
+ start = serializers.DateTimeField(required=False)
+ team_id = TeamPrimaryKeyRelatedField(read_only=True, source="team")
+
+ def update(self, instance, validated_data):
+ event_type = validated_data.get("type", instance.type)
+ frequency = validated_data.get("frequency", instance.frequency)
+ start_rotation_from_user_index = validated_data.get(
+ "start_rotation_from_user_index", instance.start_rotation_from_user_index
+ )
+ week_start = validated_data.get("week_start")
+ if frequency != instance.frequency:
+ self._validate_frequency_and_week_start(event_type, frequency, week_start)
+
+ by_day = validated_data.get("by_day", instance.by_day)
+ by_monthday = validated_data.get("by_monthday", instance.by_monthday)
+ self._validate_frequency_daily(event_type, frequency, by_day, by_monthday)
+
+ if start_rotation_from_user_index != instance.start_rotation_from_user_index:
+ self._validate_start_rotation_from_user_index(event_type, start_rotation_from_user_index)
+ validated_data = self._correct_validated_data(event_type, validated_data)
+ result = super().update(instance, validated_data)
+ for schedule in instance.schedules.all():
+ drop_cached_ical_task.apply_async(
+ (schedule.pk,),
+ )
+ schedule_notify_about_empty_shifts_in_schedule.apply_async((instance.pk,))
+ schedule_notify_about_gaps_in_schedule.apply_async((instance.pk,))
+ return result
diff --git a/engine/apps/public_api/serializers/organizations.py b/engine/apps/public_api/serializers/organizations.py
new file mode 100644
index 0000000000..4df06f13a2
--- /dev/null
+++ b/engine/apps/public_api/serializers/organizations.py
@@ -0,0 +1,18 @@
+from rest_framework import serializers
+
+from apps.user_management.models import Organization
+
+from .maintenance import MaintainableObjectSerializerMixin
+
+
+class OrganizationSerializer(serializers.ModelSerializer, MaintainableObjectSerializerMixin):
+ id = serializers.ReadOnlyField(read_only=True, source="public_primary_key")
+
+ class Meta:
+ model = Organization
+ fields = MaintainableObjectSerializerMixin.Meta.fields + [
+ "id",
+ ]
+ read_only_fields = MaintainableObjectSerializerMixin.Meta.fields + [
+ "id",
+ ]
diff --git a/engine/apps/public_api/serializers/personal_notification_rules.py b/engine/apps/public_api/serializers/personal_notification_rules.py
new file mode 100644
index 0000000000..f0df31da0a
--- /dev/null
+++ b/engine/apps/public_api/serializers/personal_notification_rules.py
@@ -0,0 +1,155 @@
+import time
+from datetime import timedelta
+
+from rest_framework import exceptions, serializers
+
+from apps.base.models import UserNotificationPolicy
+from apps.base.models.user_notification_policy import NotificationChannelPublicAPIOptions
+from common.api_helpers.custom_fields import UserIdField
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.mixins import EagerLoadingMixin
+
+
+class PersonalNotificationRuleSerializer(EagerLoadingMixin, serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ user_id = UserIdField(required=True, source="user")
+ position = serializers.IntegerField(required=False, source="order")
+ type = serializers.CharField(
+ required=False,
+ )
+
+ duration = serializers.ChoiceField(
+ required=False, source="wait_delay", choices=UserNotificationPolicy.DURATION_CHOICES
+ )
+ manual_order = serializers.BooleanField(default=False, write_only=True)
+
+ SELECT_RELATED = ["user"]
+
+ # Public API has fields "step" and "notify_by" combined into one step "type"
+ # Step.NOTIFY is handled using NotificationChannelPublicAPIOptions class, but Step.WAIT is handled differently.
+ TYPE_WAIT = "wait"
+
+ class Meta:
+ model = UserNotificationPolicy
+ fields = ["id", "user_id", "position", "type", "duration", "manual_order", "important"]
+
+ def create(self, validated_data):
+ if "type" not in validated_data:
+ raise exceptions.ValidationError({"type": "Type is required"})
+
+ validated_data = self.correct_validated_data(validated_data)
+ # type is alias for combined step + notify_by field in serializer
+ # correct_validated_data parse type to step + notify_by
+ # that is why step key is used instead of type below
+ if "wait_delay" in validated_data and validated_data["step"] != UserNotificationPolicy.Step.WAIT:
+ raise exceptions.ValidationError({"duration": "Duration can't be set"})
+ user = validated_data.pop("user")
+ manual_order = validated_data.pop("manual_order")
+ if not manual_order:
+ order = validated_data.pop("order", None)
+ instance = UserNotificationPolicy.objects.create(**validated_data, user=user)
+ self._change_position(order, instance)
+ else:
+ instance = UserNotificationPolicy.objects.create(**validated_data, user=user)
+
+ return instance
+
+ def to_internal_value(self, data):
+ if "duration" in data:
+ try:
+ time.strptime(data["duration"], "%H:%M:%S")
+ except (ValueError, TypeError):
+ try:
+ data["duration"] = str(timedelta(seconds=data["duration"]))
+ except (ValueError, TypeError):
+ raise BadRequest(detail="Invalid duration format")
+ return super().to_internal_value(data)
+
+ def to_representation(self, instance):
+
+ step = instance.step
+ result = super().to_representation(instance)
+
+ if instance.step == UserNotificationPolicy.Step.WAIT:
+ result["type"] = self.TYPE_WAIT
+ else:
+ result["type"] = NotificationChannelPublicAPIOptions.LABELS[instance.notify_by]
+
+ result = self.clear_fields(step, result)
+
+ if "duration" in result and result["duration"] is not None:
+ result["duration"] = result["duration"].seconds
+ return result
+
+ # remove duration from response if step is not wait
+ def clear_fields(self, step, result):
+ possible_fields = ["duration"]
+ if step == UserNotificationPolicy.Step.WAIT:
+ possible_fields.remove("duration")
+ for field in possible_fields:
+ result.pop(field, None)
+ return result
+
+ def correct_validated_data(self, validated_data):
+ rule_type = validated_data.get("type")
+ step, notification_channel = self._type_to_step_and_notification_channel(rule_type)
+
+ validated_data["step"] = step
+
+ if step == UserNotificationPolicy.Step.NOTIFY:
+ validated_data["notify_by"] = notification_channel
+
+ if step == UserNotificationPolicy.Step.WAIT and "wait_delay" not in validated_data:
+ validated_data["wait_delay"] = UserNotificationPolicy.FIVE_MINUTES
+
+ validated_data.pop("type")
+ return validated_data
+
+ @classmethod
+ def _type_to_step_and_notification_channel(cls, rule_type):
+ if rule_type == cls.TYPE_WAIT:
+ return UserNotificationPolicy.Step.WAIT, None
+
+ for notification_channel in NotificationChannelPublicAPIOptions.AVAILABLE_FOR_USE:
+ label = NotificationChannelPublicAPIOptions.LABELS[notification_channel]
+
+ if rule_type == label:
+ return UserNotificationPolicy.Step.NOTIFY, notification_channel
+
+ raise exceptions.ValidationError({"type": "Invalid type"})
+
+ def _change_position(self, order, instance):
+ if order is not None:
+ if order >= 0:
+ instance.to(order)
+ elif order == -1:
+ instance.bottom()
+ else:
+ raise BadRequest(detail="Invalid value for position field")
+
+
+class PersonalNotificationRuleUpdateSerializer(PersonalNotificationRuleSerializer):
+ user_id = UserIdField(read_only=True, source="user")
+ important = serializers.BooleanField(read_only=True)
+
+ def update(self, instance, validated_data):
+ if validated_data.get("type", None):
+ validated_data = self.correct_validated_data(validated_data)
+ # type is alias for combined step + notify_by field in serializer
+ # correct_validated_data parse type to step + notify_by
+ # that is why step key is used instead of type below
+ if "wait_delay" in validated_data and validated_data["step"] != UserNotificationPolicy.Step.WAIT:
+ raise exceptions.ValidationError({"duration": "Duration can't be set"})
+ if validated_data["step"] != UserNotificationPolicy.Step.WAIT:
+ validated_data["wait_delay"] = None
+ else:
+ if "wait_delay" in validated_data and instance.step != UserNotificationPolicy.Step.WAIT:
+ raise exceptions.ValidationError({"duration": "Duration can't be set"})
+
+ manual_order = validated_data.pop("manual_order")
+
+ if not manual_order:
+ order = validated_data.pop("order", None)
+ self._change_position(order, instance)
+
+ return super().update(instance, validated_data)
diff --git a/engine/apps/public_api/serializers/resolution_notes.py b/engine/apps/public_api/serializers/resolution_notes.py
new file mode 100644
index 0000000000..3c5bf20900
--- /dev/null
+++ b/engine/apps/public_api/serializers/resolution_notes.py
@@ -0,0 +1,53 @@
+from rest_framework import serializers
+
+from apps.alerts.models import AlertGroup, ResolutionNote
+from common.api_helpers.custom_fields import OrganizationFilteredPrimaryKeyRelatedField, UserIdField
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.mixins import EagerLoadingMixin
+
+
+class ResolutionNoteSerializer(EagerLoadingMixin, serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ alert_group_id = OrganizationFilteredPrimaryKeyRelatedField(
+ queryset=AlertGroup.unarchived_objects,
+ source="alert_group",
+ filter_field="channel__organization",
+ )
+ text = serializers.CharField(allow_null=False, source="message_text")
+ source = serializers.CharField(read_only=True, source="get_source_display")
+ author = UserIdField(read_only=True)
+
+ class Meta:
+ model = ResolutionNote
+ fields = [
+ "id",
+ "alert_group_id",
+ "author",
+ "source",
+ "created_at",
+ "text",
+ ]
+ read_only_fields = [
+ "created_at",
+ ]
+
+ SELECT_RELATED = ["alert_group", "resolution_note_slack_message", "author"]
+
+ def create(self, validated_data):
+ validated_data["author"] = self.context["request"].user
+ validated_data["source"] = ResolutionNote.Source.WEB
+ return super().create(validated_data)
+
+ def to_representation(self, instance):
+ result = super().to_representation(instance)
+ result["text"] = instance.text
+ return result
+
+
+class ResolutionNoteUpdateSerializer(ResolutionNoteSerializer):
+ alert_group_id = serializers.CharField(read_only=True, source="alert_group.public_primary_key")
+
+ def update(self, instance, validated_data):
+ if instance.source != ResolutionNote.Source.WEB:
+ raise BadRequest(detail="Cannot update message with this source type")
+ return super().update(instance, validated_data)
diff --git a/engine/apps/public_api/serializers/routes.py b/engine/apps/public_api/serializers/routes.py
new file mode 100644
index 0000000000..6bd0e4c987
--- /dev/null
+++ b/engine/apps/public_api/serializers/routes.py
@@ -0,0 +1,187 @@
+from django.apps import apps
+from rest_framework import serializers
+
+from apps.alerts.models import AlertReceiveChannel, ChannelFilter, EscalationChain
+from common.api_helpers.custom_fields import OrganizationFilteredPrimaryKeyRelatedField
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.mixins import OrderedModelSerializerMixin
+
+
+class ChannelFilterSerializer(OrderedModelSerializerMixin, serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ slack = serializers.DictField(required=False)
+ routing_regex = serializers.CharField(allow_null=False, required=True, source="filtering_term")
+ position = serializers.IntegerField(required=False, source="order")
+ integration_id = OrganizationFilteredPrimaryKeyRelatedField(
+ queryset=AlertReceiveChannel.objects, source="alert_receive_channel"
+ )
+ escalation_chain_id = OrganizationFilteredPrimaryKeyRelatedField(
+ queryset=EscalationChain.objects,
+ source="escalation_chain",
+ )
+
+ is_the_last_route = serializers.BooleanField(read_only=True, source="is_default")
+ manual_order = serializers.BooleanField(default=False, write_only=True)
+
+ class Meta:
+ model = ChannelFilter
+ fields = [
+ "id",
+ "integration_id",
+ "escalation_chain_id",
+ "routing_regex",
+ "position",
+ "is_the_last_route",
+ "slack",
+ "manual_order",
+ ]
+ read_only_fields = ("is_the_last_route",)
+
+ def to_representation(self, instance):
+ result = super().to_representation(instance)
+ result["slack"] = {"channel_id": instance.slack_channel_id}
+ return result
+
+ def create(self, validated_data):
+ validated_data = self._correct_validated_data(validated_data)
+ manual_order = validated_data.pop("manual_order")
+ if not manual_order:
+ order = validated_data.pop("order", None)
+ alert_receive_channel_id = validated_data.get("alert_receive_channel")
+ # validate 'order' value before creation
+ self._validate_order(order, {"alert_receive_channel_id": alert_receive_channel_id, "is_default": False})
+ instance = super().create(validated_data)
+ self._change_position(order, instance)
+ else:
+ instance = super().create(validated_data)
+
+ return instance
+
+ def validate(self, attrs):
+ alert_receive_channel = attrs.get("alert_receive_channel") or self.instance.alert_receive_channel
+ filtering_term = attrs.get("filtering_term")
+ if filtering_term is None:
+ return attrs
+ try:
+ obj = ChannelFilter.objects.get(alert_receive_channel=alert_receive_channel, filtering_term=filtering_term)
+ except ChannelFilter.DoesNotExist:
+ return attrs
+ if self.instance and obj.id == self.instance.id:
+ return attrs
+ else:
+ raise BadRequest(detail="Route with this regex already exists")
+
+ def validate_escalation_chain_id(self, escalation_chain):
+ if self.instance is not None:
+ alert_receive_channel = self.instance.alert_receive_channel
+ else:
+ alert_receive_channel = AlertReceiveChannel.objects.get(
+ public_primary_key=self.initial_data["integration_id"]
+ )
+
+ if escalation_chain.team != alert_receive_channel.team:
+ raise BadRequest(detail="Escalation chain must be assigned to the same team as the integration")
+
+ return escalation_chain
+
+ def _correct_validated_data(self, validated_data):
+ slack_field = validated_data.pop("slack", {})
+ if "channel_id" in slack_field:
+ validated_data["slack_channel_id"] = self._validate_slack_channel_id(slack_field.get("channel_id"))
+ return validated_data
+
+ def _validate_slack_channel_id(self, slack_channel_id):
+ SlackChannel = apps.get_model("slack", "SlackChannel")
+
+ if slack_channel_id is not None:
+ slack_channel_id = slack_channel_id.upper()
+ organization = self.context["request"].auth.organization
+ slack_team_identity = organization.slack_team_identity
+ try:
+ slack_team_identity.get_cached_channels().get(slack_id=slack_channel_id)
+ except SlackChannel.DoesNotExist:
+ raise BadRequest(detail="Slack channel does not exist")
+ return slack_channel_id
+
+
+class ChannelFilterUpdateSerializer(ChannelFilterSerializer):
+ integration_id = OrganizationFilteredPrimaryKeyRelatedField(source="alert_receive_channel", read_only=True)
+ routing_regex = serializers.CharField(allow_null=False, required=False, source="filtering_term")
+
+ class Meta(ChannelFilterSerializer.Meta):
+ read_only_fields = [*ChannelFilterSerializer.Meta.read_only_fields, "integration_id"]
+
+ def update(self, instance, validated_data):
+ validated_data = self._correct_validated_data(validated_data)
+
+ manual_order = validated_data.pop("manual_order")
+ if not manual_order:
+ order = validated_data.pop("order", None)
+ self._validate_order(
+ order, {"alert_receive_channel_id": instance.alert_receive_channel_id, "is_default": False}
+ )
+ self._change_position(order, instance)
+
+ return super().update(instance, validated_data)
+
+
+class DefaultChannelFilterSerializer(OrderedModelSerializerMixin, serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ slack = serializers.DictField(required=False)
+ escalation_chain_id = OrganizationFilteredPrimaryKeyRelatedField(
+ queryset=EscalationChain.objects,
+ source="escalation_chain",
+ allow_null=True,
+ required=False,
+ )
+
+ class Meta:
+ model = ChannelFilter
+ fields = [
+ "id",
+ "slack",
+ "escalation_chain_id",
+ ]
+
+ def _validate_slack_channel_id(self, slack_channel_id):
+ SlackChannel = apps.get_model("slack", "SlackChannel")
+
+ if slack_channel_id is not None:
+ slack_channel_id = slack_channel_id.upper()
+ organization = self.context["request"].auth.organization
+ slack_team_identity = organization.slack_team_identity
+ try:
+ slack_team_identity.get_cached_channels().get(slack_id=slack_channel_id)
+ except SlackChannel.DoesNotExist:
+ raise BadRequest(detail="Slack channel does not exist")
+ return slack_channel_id
+
+ def _correct_validated_data(self, validated_data):
+ slack_field = validated_data.pop("slack", {})
+ if "channel_id" in slack_field:
+ validated_data["slack_channel_id"] = self._validate_slack_channel_id(slack_field.get("channel_id"))
+ return validated_data
+
+ def to_representation(self, instance):
+ result = super().to_representation(instance)
+ result["slack"] = {"channel_id": instance.slack_channel_id}
+ return result
+
+ def update(self, instance, validated_data):
+ validated_data = self._correct_validated_data(validated_data)
+ return super().update(instance, validated_data)
+
+ def validate_escalation_chain_id(self, escalation_chain):
+ if escalation_chain is None:
+ return escalation_chain
+ if self.instance is not None:
+ alert_receive_channel = self.instance.alert_receive_channel
+ else:
+ alert_receive_channel = AlertReceiveChannel.objects.get(
+ public_primary_key=self.initial_data["integration_id"]
+ )
+
+ if escalation_chain.team != alert_receive_channel.team:
+ raise BadRequest(detail="Escalation chain must be assigned to the same team as the integration")
+
+ return escalation_chain
diff --git a/engine/apps/public_api/serializers/schedules_base.py b/engine/apps/public_api/serializers/schedules_base.py
new file mode 100644
index 0000000000..80cd8bc5ad
--- /dev/null
+++ b/engine/apps/public_api/serializers/schedules_base.py
@@ -0,0 +1,92 @@
+from django.apps import apps
+from django.utils import timezone
+from rest_framework import serializers
+
+from apps.public_api import constants as public_api_constants
+from apps.public_api.helpers import is_demo_token_request
+from apps.schedules.ical_utils import list_users_to_notify_from_ical
+from apps.schedules.models import OnCallSchedule
+from apps.slack.models import SlackUserGroup
+from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField
+from common.api_helpers.exceptions import BadRequest
+
+
+class ScheduleBaseSerializer(serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ on_call_now = serializers.SerializerMethodField()
+ slack = serializers.DictField(required=False)
+ team_id = TeamPrimaryKeyRelatedField(required=False, allow_null=True, source="team")
+
+ def create(self, validated_data):
+ validated_data = self._correct_validated_data(validated_data)
+ validated_data["organization"] = self.context["request"].auth.organization
+ return super().create(validated_data)
+
+ def validate_name(self, name):
+ organization = self.context["request"].auth.organization
+ if name is None:
+ return name
+ try:
+ obj = OnCallSchedule.objects.get(organization=organization, name=name)
+ except OnCallSchedule.DoesNotExist:
+ return name
+ if self.instance and obj.id == self.instance.id:
+ return name
+ else:
+ raise BadRequest(detail="Schedule with this name already exists")
+
+ def get_on_call_now(self, obj):
+ if not is_demo_token_request(self.context["request"]):
+ users_on_call = list_users_to_notify_from_ical(obj, timezone.datetime.now(timezone.utc))
+ if users_on_call is not None:
+ return [user.public_primary_key for user in users_on_call]
+ else:
+ return []
+ else:
+ return [public_api_constants.DEMO_USER_ID]
+
+ def _correct_validated_data(self, validated_data):
+ slack_field = validated_data.pop("slack", {})
+ if "channel_id" in slack_field:
+ validated_data["channel"] = slack_field["channel_id"]
+
+ if "user_group_id" in slack_field:
+ validated_data["user_group"] = SlackUserGroup.objects.filter(slack_id=slack_field["user_group_id"]).first()
+
+ return validated_data
+
+ def validate_slack(self, slack_field):
+ SlackChannel = apps.get_model("slack", "SlackChannel")
+
+ slack_channel_id = slack_field.get("channel_id")
+ user_group_id = slack_field.get("user_group_id")
+
+ organization = self.context["request"].auth.organization
+ slack_team_identity = organization.slack_team_identity
+
+ if slack_channel_id is not None:
+ slack_channel_id = slack_channel_id.upper()
+ try:
+ slack_team_identity.get_cached_channels().get(slack_id=slack_channel_id)
+ except SlackChannel.DoesNotExist:
+ raise BadRequest(detail="Slack channel does not exist")
+
+ if user_group_id is not None:
+ user_group_id = user_group_id.upper()
+ try:
+ slack_team_identity.usergroups.get(slack_id=user_group_id)
+ except SlackUserGroup.DoesNotExist:
+ raise BadRequest(detail="Slack user group does not exist")
+
+ return slack_field
+
+ def to_representation(self, instance):
+ result = super().to_representation(instance)
+
+ user_group_id = instance.user_group.slack_id if instance.user_group is not None else None
+ result["slack"] = {
+ "channel_id": instance.channel or None,
+ "user_group_id": user_group_id,
+ }
+
+ return result
diff --git a/engine/apps/public_api/serializers/schedules_calendar.py b/engine/apps/public_api/serializers/schedules_calendar.py
new file mode 100644
index 0000000000..55c2d2a229
--- /dev/null
+++ b/engine/apps/public_api/serializers/schedules_calendar.py
@@ -0,0 +1,106 @@
+import pytz
+from django.utils import timezone
+from rest_framework import serializers
+
+from apps.public_api.serializers.schedules_base import ScheduleBaseSerializer
+from apps.schedules.models import CustomOnCallShift, OnCallScheduleCalendar
+from apps.schedules.tasks import (
+ drop_cached_ical_task,
+ schedule_notify_about_empty_shifts_in_schedule,
+ schedule_notify_about_gaps_in_schedule,
+)
+from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField, UsersFilteredByOrganizationField
+from common.api_helpers.exceptions import BadRequest
+
+
+class ScheduleCalendarSerializer(ScheduleBaseSerializer):
+ time_zone = serializers.CharField(required=True)
+ shifts = UsersFilteredByOrganizationField(
+ queryset=CustomOnCallShift.objects,
+ required=False,
+ source="custom_on_call_shifts",
+ )
+
+ class Meta:
+ model = OnCallScheduleCalendar
+ fields = [
+ "id",
+ "team_id",
+ "name",
+ "time_zone",
+ "slack",
+ "on_call_now",
+ "shifts",
+ "ical_url_overrides",
+ ]
+ extra_kwargs = {
+ "ical_url_overrides": {"required": False, "allow_null": True},
+ }
+
+ def validate_time_zone(self, tz):
+ try:
+ timezone.now().astimezone(pytz.timezone(tz))
+ except pytz.exceptions.UnknownTimeZoneError:
+ raise BadRequest(detail="Invalid time zone")
+ return tz
+
+ def validate_shifts(self, shifts):
+ # Get team_id from instance, if it exists, otherwise get it from initial data.
+ # Terraform sends empty string instead of None. In this case change team_id value to None.
+ team_id = self.instance.team_id if self.instance else (self.initial_data.get("team_id") or None)
+ for shift in shifts:
+ if shift.team_id != team_id:
+ raise BadRequest(detail="Shifts must be assigned to the same team as the schedule")
+
+ return shifts
+
+ def to_internal_value(self, data):
+ if data.get("shifts", []) is None: # terraform case
+ data["shifts"] = []
+ result = super().to_internal_value(data)
+ return result
+
+
+class ScheduleCalendarUpdateSerializer(ScheduleCalendarSerializer):
+ time_zone = serializers.CharField(required=False)
+ team_id = TeamPrimaryKeyRelatedField(read_only=True, source="team")
+
+ class Meta:
+ model = OnCallScheduleCalendar
+ fields = [
+ "id",
+ "team_id",
+ "name",
+ "time_zone",
+ "slack",
+ "on_call_now",
+ "shifts",
+ "ical_url_overrides",
+ ]
+ extra_kwargs = {
+ "name": {"required": False},
+ "ical_url_overrides": {"required": False, "allow_null": True},
+ }
+
+ def update(self, instance, validated_data):
+ validated_data = self._correct_validated_data(validated_data)
+ new_time_zone = validated_data.get("time_zone", instance.time_zone)
+ new_shifts = validated_data.get("shifts", [])
+ existing_shifts = instance.custom_on_call_shifts.all()
+
+ ical_changed = False
+
+ if new_time_zone != instance.time_zone or set(existing_shifts) != set(new_shifts):
+ ical_changed = True
+ if (
+ "ical_url_overrides" in validated_data
+ and validated_data["ical_url_overrides"] != instance.ical_url_overrides
+ ):
+ ical_changed = True
+ if ical_changed:
+ drop_cached_ical_task.apply_async(
+ (instance.pk,),
+ )
+ schedule_notify_about_empty_shifts_in_schedule.apply_async((instance.pk,))
+ schedule_notify_about_gaps_in_schedule.apply_async((instance.pk,))
+ return super().update(instance, validated_data)
diff --git a/engine/apps/public_api/serializers/schedules_ical.py b/engine/apps/public_api/serializers/schedules_ical.py
new file mode 100644
index 0000000000..2f7b9b4ecd
--- /dev/null
+++ b/engine/apps/public_api/serializers/schedules_ical.py
@@ -0,0 +1,73 @@
+from apps.public_api.serializers.schedules_base import ScheduleBaseSerializer
+from apps.schedules.models import OnCallScheduleICal
+from apps.schedules.tasks import (
+ drop_cached_ical_task,
+ schedule_notify_about_empty_shifts_in_schedule,
+ schedule_notify_about_gaps_in_schedule,
+)
+from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField
+from common.api_helpers.utils import validate_ical_url
+
+
+class ScheduleICalSerializer(ScheduleBaseSerializer):
+ class Meta:
+ model = OnCallScheduleICal
+ fields = [
+ "id",
+ "team_id",
+ "name",
+ "ical_url_primary",
+ "ical_url_overrides",
+ "slack",
+ "on_call_now",
+ ]
+ extra_kwargs = {
+ "ical_url_primary": {"required": True, "allow_null": False},
+ "ical_url_overrides": {"required": False, "allow_null": True},
+ }
+
+ def validate_ical_url_primary(self, url):
+ return validate_ical_url(url)
+
+ def validate_ical_url_overrides(self, url):
+ return validate_ical_url(url)
+
+
+class ScheduleICalUpdateSerializer(ScheduleICalSerializer):
+ team_id = TeamPrimaryKeyRelatedField(read_only=True, source="team")
+
+ class Meta:
+ model = OnCallScheduleICal
+ fields = [
+ "id",
+ "team_id",
+ "name",
+ "ical_url_primary",
+ "ical_url_overrides",
+ "slack",
+ "on_call_now",
+ ]
+ extra_kwargs = {
+ "name": {"required": False},
+ "ical_url_primary": {"required": False, "allow_null": False},
+ "ical_url_overrides": {"required": False, "allow_null": True},
+ }
+
+ def update(self, instance, validated_data):
+ ical_changed = False
+ validated_data = self._correct_validated_data(validated_data)
+
+ if "ical_url_primary" in validated_data and validated_data["ical_url_primary"] != instance.ical_url_primary:
+ ical_changed = True
+ if (
+ "ical_url_overrides" in validated_data
+ and validated_data["ical_url_overrides"] != instance.ical_url_overrides
+ ):
+ ical_changed = True
+ if ical_changed:
+ drop_cached_ical_task.apply_async(
+ (instance.pk,),
+ )
+ schedule_notify_about_empty_shifts_in_schedule.apply_async((instance.pk,))
+ schedule_notify_about_gaps_in_schedule.apply_async((instance.pk,))
+ return super().update(instance, validated_data)
diff --git a/engine/apps/public_api/serializers/schedules_polymorphic.py b/engine/apps/public_api/serializers/schedules_polymorphic.py
new file mode 100644
index 0000000000..54ed910498
--- /dev/null
+++ b/engine/apps/public_api/serializers/schedules_polymorphic.py
@@ -0,0 +1,47 @@
+from rest_framework.fields import empty
+from rest_polymorphic.serializers import PolymorphicSerializer
+
+from apps.public_api.serializers.schedules_calendar import ScheduleCalendarSerializer, ScheduleCalendarUpdateSerializer
+from apps.public_api.serializers.schedules_ical import ScheduleICalSerializer, ScheduleICalUpdateSerializer
+from apps.schedules.models import OnCallScheduleCalendar, OnCallScheduleICal
+from common.api_helpers.mixins import EagerLoadingMixin
+
+
+class PolymorphicScheduleSerializer(EagerLoadingMixin, PolymorphicSerializer):
+ SELECT_RELATED = ["organization"]
+
+ resource_type_field_name = "type"
+
+ model_serializer_mapping = {
+ OnCallScheduleICal: ScheduleICalSerializer,
+ OnCallScheduleCalendar: ScheduleCalendarSerializer,
+ }
+
+ SCHEDULE_CLASS_TO_TYPE = {OnCallScheduleCalendar: "calendar", OnCallScheduleICal: "ical"}
+
+ def to_resource_type(self, model_or_instance):
+ return self.SCHEDULE_CLASS_TO_TYPE.get(model_or_instance._meta.model)
+
+
+class PolymorphicScheduleUpdateSerializer(PolymorphicScheduleSerializer):
+ model_serializer_mapping = {
+ OnCallScheduleICal: ScheduleICalUpdateSerializer,
+ OnCallScheduleCalendar: ScheduleCalendarUpdateSerializer,
+ }
+
+ def update(self, instance, validated_data):
+ """Overridden method of PolymorphicSerializer, here we get serializer from instance instead of validated data"""
+ serializer = self._get_serializer_from_model_or_instance(instance)
+ return serializer.update(instance, validated_data)
+
+ def to_internal_value(self, data):
+ """Overridden method of PolymorphicSerializer, here we get serializer from instance instead of data"""
+ serializer = self._get_serializer_from_model_or_instance(self.instance)
+ ret = serializer.to_internal_value(data)
+ return ret
+
+ def run_validation(self, data=empty):
+ """Overridden method of PolymorphicSerializer, here we get serializer from instance instead of data"""
+ serializer = self._get_serializer_from_model_or_instance(self.instance)
+ validated_data = serializer.run_validation(data)
+ return validated_data
diff --git a/engine/apps/public_api/serializers/slack_channel.py b/engine/apps/public_api/serializers/slack_channel.py
new file mode 100644
index 0000000000..a350a6d7f3
--- /dev/null
+++ b/engine/apps/public_api/serializers/slack_channel.py
@@ -0,0 +1,9 @@
+from rest_framework import serializers
+
+from apps.slack.models import SlackChannel
+
+
+class SlackChannelSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = SlackChannel
+ fields = ["name", "slack_id"]
diff --git a/engine/apps/public_api/serializers/teams.py b/engine/apps/public_api/serializers/teams.py
new file mode 100644
index 0000000000..04c36c0088
--- /dev/null
+++ b/engine/apps/public_api/serializers/teams.py
@@ -0,0 +1,11 @@
+from rest_framework import serializers
+
+from apps.user_management.models import Team
+
+
+class TeamSerializer(serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+
+ class Meta:
+ model = Team
+ fields = ("id", "name", "email", "avatar_url")
diff --git a/engine/apps/public_api/serializers/user_groups.py b/engine/apps/public_api/serializers/user_groups.py
new file mode 100644
index 0000000000..a5bde7b3e4
--- /dev/null
+++ b/engine/apps/public_api/serializers/user_groups.py
@@ -0,0 +1,27 @@
+from rest_framework import serializers
+
+from apps.slack.models import SlackUserGroup
+
+
+class UserGroupSerializer(serializers.ModelSerializer):
+ id = serializers.CharField(read_only=True, source="public_primary_key")
+ type = serializers.SerializerMethodField(read_only=True)
+ slack = serializers.SerializerMethodField(read_only=True)
+
+ class Meta:
+ model = SlackUserGroup
+ fields = [
+ "id",
+ "type",
+ "slack",
+ ]
+
+ def get_type(self, obj):
+ return "slack_based" # change when another group types will be able
+
+ def get_slack(self, obj):
+ return {
+ "id": obj.slack_id,
+ "name": obj.name,
+ "handle": obj.handle,
+ }
diff --git a/engine/apps/public_api/serializers/users.py b/engine/apps/public_api/serializers/users.py
new file mode 100644
index 0000000000..8afdedafe2
--- /dev/null
+++ b/engine/apps/public_api/serializers/users.py
@@ -0,0 +1,60 @@
+from rest_framework import serializers
+
+from apps.slack.models import SlackUserIdentity
+from apps.user_management.models import User
+from common.api_helpers.mixins import EagerLoadingMixin
+from common.constants.role import Role
+
+
+class SlackUserIdentitySerializer(serializers.ModelSerializer):
+ user_id = serializers.CharField(source="slack_id")
+ team_id = serializers.CharField(source="slack_team_identity.slack_id")
+
+ class Meta:
+ model = SlackUserIdentity
+ fields = (
+ "user_id",
+ "team_id",
+ )
+
+
+class FastUserSerializer(serializers.ModelSerializer):
+ id = serializers.ReadOnlyField(read_only=True, source="public_primary_key")
+ email = serializers.EmailField(read_only=True)
+ role = serializers.SerializerMethodField()
+ is_phone_number_verified = serializers.SerializerMethodField()
+
+ class Meta:
+ model = User
+ fields = ["id", "email", "username", "role", "is_phone_number_verified"]
+
+ @staticmethod
+ def get_role(obj):
+ return Role(obj.role).name.lower()
+
+ def get_is_phone_number_verified(self, obj):
+ return obj.verified_phone_number is not None
+
+
+class UserSerializer(serializers.ModelSerializer, EagerLoadingMixin):
+ id = serializers.ReadOnlyField(read_only=True, source="public_primary_key")
+ email = serializers.EmailField(read_only=True)
+ role = serializers.SerializerMethodField()
+ slack = SlackUserIdentitySerializer(read_only=True, source="slack_user_identity")
+ is_phone_number_verified = serializers.SerializerMethodField()
+
+ SELECT_RELATED = [
+ "slack_user_identity",
+ "slack_user_identity__slack_team_identity",
+ ]
+
+ class Meta:
+ model = User
+ fields = ["id", "email", "slack", "username", "role", "is_phone_number_verified"]
+
+ @staticmethod
+ def get_role(obj):
+ return Role(obj.role).name.lower()
+
+ def get_is_phone_number_verified(self, obj):
+ return obj.verified_phone_number is not None
diff --git a/engine/apps/public_api/tests/__init__.py b/engine/apps/public_api/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/public_api/tests/conftest.py b/engine/apps/public_api/tests/conftest.py
new file mode 100644
index 0000000000..a4d11c266e
--- /dev/null
+++ b/engine/apps/public_api/tests/conftest.py
@@ -0,0 +1,243 @@
+import pytest
+from django.utils import dateparse, timezone
+from pytest_factoryboy import register
+
+from apps.alerts.models import EscalationPolicy, ResolutionNote
+from apps.auth_token.models import ApiAuthToken
+from apps.base.models import UserNotificationPolicy
+from apps.public_api import constants as public_api_constants
+from apps.schedules.models import CustomOnCallShift, OnCallScheduleCalendar, OnCallScheduleICal
+from apps.user_management.tests.factories import OrganizationFactory, UserFactory
+from common.constants.role import Role
+
+register(UserFactory)
+register(OrganizationFactory)
+
+
+@pytest.fixture()
+def make_organization_and_user_with_token(make_organization_and_user, make_public_api_token):
+ def _make_organization_and_user_with_token():
+ organization, user = make_organization_and_user()
+ _, token = make_public_api_token(user, organization)
+ return organization, user, token
+
+ return _make_organization_and_user_with_token
+
+
+@pytest.fixture()
+def make_organization_and_user_with_slack_identities_for_demo_token(
+ make_slack_team_identity,
+ make_organization,
+ make_slack_user_identity,
+ make_user,
+):
+ def _make_organization_and_user_with_slack_identities_for_demo_token():
+ slack_team_identity = make_slack_team_identity(slack_id=public_api_constants.DEMO_SLACK_TEAM_ID)
+ organization = make_organization(
+ slack_team_identity=slack_team_identity, public_primary_key=public_api_constants.DEMO_ORGANIZATION_ID
+ )
+ slack_user_identity = make_slack_user_identity(
+ slack_id=public_api_constants.DEMO_SLACK_USER_ID,
+ slack_team_identity=slack_team_identity,
+ )
+ user = make_user(
+ organization=organization,
+ public_primary_key=public_api_constants.DEMO_USER_ID,
+ email=public_api_constants.DEMO_USER_EMAIL,
+ username=public_api_constants.DEMO_USER_USERNAME,
+ role=Role.ADMIN,
+ slack_user_identity=slack_user_identity,
+ )
+ ApiAuthToken.create_auth_token(user, organization, public_api_constants.DEMO_AUTH_TOKEN)
+ token = public_api_constants.DEMO_AUTH_TOKEN
+ return organization, user, token
+
+ return _make_organization_and_user_with_slack_identities_for_demo_token
+
+
+@pytest.fixture()
+def make_data_for_demo_token(
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_escalation_chain,
+ make_escalation_policy,
+ make_alert_group,
+ make_alert,
+ make_resolution_note,
+ make_custom_action,
+ make_slack_user_group,
+ make_schedule,
+ make_on_call_shift,
+ make_slack_channel,
+ make_user_notification_policy,
+):
+ def _make_data_for_demo_token(organization, user):
+ alert_receive_channel = make_alert_receive_channel(
+ organization,
+ public_primary_key=public_api_constants.DEMO_INTEGRATION_ID,
+ verbal_name=public_api_constants.DEMO_INTEGRATION_NAME,
+ )
+ route_1 = make_channel_filter(
+ public_primary_key=public_api_constants.DEMO_ROUTE_ID_1,
+ alert_receive_channel=alert_receive_channel,
+ slack_channel_id=public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID,
+ filtering_term="us-(east|west)",
+ order=0,
+ )
+ make_channel_filter(
+ public_primary_key=public_api_constants.DEMO_ROUTE_ID_2,
+ alert_receive_channel=alert_receive_channel,
+ slack_channel_id=public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID,
+ filtering_term=".*",
+ order=1,
+ is_default=True,
+ )
+ escalation_chain = make_escalation_chain(
+ organization, public_primary_key=public_api_constants.DEMO_ESCALATION_CHAIN_ID
+ )
+ make_escalation_policy(
+ escalation_chain,
+ public_primary_key=public_api_constants.DEMO_ESCALATION_POLICY_ID_1,
+ escalation_policy_step=EscalationPolicy.STEP_WAIT,
+ order=0,
+ wait_delay=EscalationPolicy.ONE_MINUTE,
+ )
+ escalation_policy_2 = make_escalation_policy(
+ escalation_chain,
+ public_primary_key=public_api_constants.DEMO_ESCALATION_POLICY_ID_2,
+ escalation_policy_step=EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
+ order=1,
+ )
+ escalation_policy_2.notify_to_users_queue.add(user)
+ alert_group = make_alert_group(
+ alert_receive_channel,
+ public_primary_key=public_api_constants.DEMO_INCIDENT_ID,
+ resolved=True,
+ channel_filter=route_1,
+ )
+ alert_group.started_at = dateparse.parse_datetime(public_api_constants.DEMO_INCIDENT_CREATED_AT)
+ alert_group.resolved_at = dateparse.parse_datetime(public_api_constants.DEMO_INCIDENT_RESOLVED_AT)
+ alert_group.save(update_fields=["started_at", "resolved_at"])
+ for alert_id, created_at in public_api_constants.DEMO_ALERT_IDS:
+ alert = make_alert(
+ public_primary_key=alert_id,
+ alert_group=alert_group,
+ raw_request_data=public_api_constants.DEMO_ALERT_PAYLOAD,
+ )
+ alert.created_at = dateparse.parse_datetime(created_at)
+ alert.save(update_fields=["created_at"])
+
+ resolution_note = make_resolution_note(
+ alert_group=alert_group,
+ source=ResolutionNote.Source.WEB,
+ author=user,
+ public_primary_key=public_api_constants.DEMO_RESOLUTION_NOTE_ID,
+ message_text=public_api_constants.DEMO_RESOLUTION_NOTE_TEXT,
+ )
+ resolution_note.created_at = dateparse.parse_datetime(public_api_constants.DEMO_RESOLUTION_NOTE_CREATED_AT)
+ resolution_note.save(update_fields=["created_at"])
+
+ make_custom_action(
+ public_primary_key=public_api_constants.DEMO_CUSTOM_ACTION_ID,
+ organization=organization,
+ name=public_api_constants.DEMO_CUSTOM_ACTION_NAME,
+ )
+
+ user_group = make_slack_user_group(
+ public_primary_key=public_api_constants.DEMO_SLACK_USER_GROUP_ID,
+ name=public_api_constants.DEMO_SLACK_USER_GROUP_NAME,
+ handle=public_api_constants.DEMO_SLACK_USER_GROUP_HANDLE,
+ slack_id=public_api_constants.DEMO_SLACK_USER_GROUP_SLACK_ID,
+ slack_team_identity=organization.slack_team_identity,
+ )
+
+ # ical schedule
+ make_schedule(
+ organization=organization,
+ schedule_class=OnCallScheduleICal,
+ public_primary_key=public_api_constants.DEMO_SCHEDULE_ID_ICAL,
+ ical_url_primary=public_api_constants.DEMO_SCHEDULE_ICAL_URL_PRIMARY,
+ ical_url_overrides=public_api_constants.DEMO_SCHEDULE_ICAL_URL_OVERRIDES,
+ name=public_api_constants.DEMO_SCHEDULE_NAME_ICAL,
+ channel=public_api_constants.DEMO_SLACK_CHANNEL_SLACK_ID,
+ user_group=user_group,
+ )
+ # calendar schedule
+ schedule_calendar = make_schedule(
+ organization=organization,
+ schedule_class=OnCallScheduleCalendar,
+ public_primary_key=public_api_constants.DEMO_SCHEDULE_ID_CALENDAR,
+ name=public_api_constants.DEMO_SCHEDULE_NAME_CALENDAR,
+ channel=public_api_constants.DEMO_SLACK_CHANNEL_SLACK_ID,
+ user_group=user_group,
+ time_zone="America/New_york",
+ )
+
+ on_call_shift_1 = make_on_call_shift(
+ shift_type=CustomOnCallShift.TYPE_SINGLE_EVENT,
+ organization=organization,
+ public_primary_key=public_api_constants.DEMO_ON_CALL_SHIFT_ID_1,
+ name=public_api_constants.DEMO_ON_CALL_SHIFT_NAME_1,
+ start=dateparse.parse_datetime(public_api_constants.DEMO_ON_CALL_SHIFT_START_1),
+ duration=timezone.timedelta(seconds=public_api_constants.DEMO_ON_CALL_SHIFT_DURATION),
+ )
+ on_call_shift_1.users.add(user)
+
+ on_call_shift_2 = make_on_call_shift(
+ shift_type=CustomOnCallShift.TYPE_RECURRENT_EVENT,
+ organization=organization,
+ public_primary_key=public_api_constants.DEMO_ON_CALL_SHIFT_ID_2,
+ name=public_api_constants.DEMO_ON_CALL_SHIFT_NAME_2,
+ start=dateparse.parse_datetime(public_api_constants.DEMO_ON_CALL_SHIFT_START_2),
+ duration=timezone.timedelta(seconds=public_api_constants.DEMO_ON_CALL_SHIFT_DURATION),
+ frequency=CustomOnCallShift.FREQUENCY_WEEKLY,
+ interval=2,
+ by_day=public_api_constants.DEMO_ON_CALL_SHIFT_BY_DAY,
+ source=CustomOnCallShift.SOURCE_TERRAFORM,
+ )
+ on_call_shift_2.users.add(user)
+
+ schedule_calendar.custom_on_call_shifts.add(on_call_shift_1)
+ schedule_calendar.custom_on_call_shifts.add(on_call_shift_2)
+
+ make_slack_channel(
+ organization.slack_team_identity,
+ slack_id=public_api_constants.DEMO_SLACK_CHANNEL_SLACK_ID,
+ name=public_api_constants.DEMO_SLACK_CHANNEL_NAME,
+ )
+ make_user_notification_policy(
+ public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_1,
+ important=False,
+ user=user,
+ notify_by=UserNotificationPolicy.NotificationChannel.SMS,
+ step=UserNotificationPolicy.Step.NOTIFY,
+ order=0,
+ )
+ make_user_notification_policy(
+ public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_2,
+ important=False,
+ user=user,
+ step=UserNotificationPolicy.Step.WAIT,
+ wait_delay=UserNotificationPolicy.FIVE_MINUTES,
+ order=1,
+ )
+ make_user_notification_policy(
+ public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_3,
+ important=False,
+ user=user,
+ step=UserNotificationPolicy.Step.NOTIFY,
+ notify_by=UserNotificationPolicy.NotificationChannel.PHONE_CALL,
+ order=2,
+ )
+
+ make_user_notification_policy(
+ public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_4,
+ important=True,
+ user=user,
+ step=UserNotificationPolicy.Step.NOTIFY,
+ notify_by=UserNotificationPolicy.NotificationChannel.PHONE_CALL,
+ order=0,
+ )
+ return
+
+ return _make_data_for_demo_token
diff --git a/engine/apps/public_api/tests/test_alerts.py b/engine/apps/public_api/tests/test_alerts.py
new file mode 100644
index 0000000000..c6e3994b1e
--- /dev/null
+++ b/engine/apps/public_api/tests/test_alerts.py
@@ -0,0 +1,172 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+alert_raw_request_data = {
+ "evalMatches": [
+ {"value": 100, "metric": "High value", "tags": None},
+ {"value": 200, "metric": "Higher Value", "tags": None},
+ ],
+ "message": "Someone is testing the alert notification within grafana.",
+ "ruleId": 0,
+ "ruleName": "Test notification",
+ "ruleUrl": "http://localhost:3000/",
+ "state": "alerting",
+ "title": "[Alerting] Test notification",
+}
+
+
+@pytest.fixture()
+def alert_public_api_setup(
+ make_organization,
+ make_alert_receive_channel,
+ make_channel_filter,
+):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+ return organization, alert_receive_channel, default_channel_filter
+
+
+@pytest.mark.django_db
+def test_get_list_alerts(
+ alert_public_api_setup,
+ make_user_for_organization,
+ make_public_api_token,
+ make_alert_group,
+ make_alert,
+):
+ # https://api-docs.amixr.io/#list-alerts
+ organization, alert_receive_channel, default_channel_filter = alert_public_api_setup
+ alert_group = make_alert_group(alert_receive_channel)
+ alert = make_alert(alert_group, alert_raw_request_data)
+ admin = make_user_for_organization(organization)
+ _, token = make_public_api_token(admin, organization)
+
+ client = APIClient()
+
+ url = reverse("api-public:alerts-list")
+ response = client.get(url, HTTP_AUTHORIZATION=f"{token}")
+
+ expected_response = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": alert.public_primary_key,
+ "alert_group_id": alert_group.public_primary_key,
+ "created_at": alert.created_at.isoformat().replace("+00:00", "Z"),
+ "payload": {
+ "state": "alerting",
+ "title": "[Alerting] Test notification",
+ "ruleId": 0,
+ "message": "Someone is testing the alert notification within grafana.",
+ "ruleUrl": "http://localhost:3000/",
+ "ruleName": "Test notification",
+ "evalMatches": [
+ {"tags": None, "value": 100, "metric": "High value"},
+ {"tags": None, "value": 200, "metric": "Higher Value"},
+ ],
+ },
+ }
+ ],
+ }
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_get_list_alerts_filter_by_incident(
+ alert_public_api_setup,
+ make_user_for_organization,
+ make_public_api_token,
+ make_alert_group,
+ make_alert,
+):
+ # https://api-docs.amixr.io/#list-alerts
+ organization, alert_receive_channel, default_channel_filter = alert_public_api_setup
+ alert_group = make_alert_group(alert_receive_channel)
+ make_alert(alert_group, alert_raw_request_data)
+ admin = make_user_for_organization(organization)
+ _, token = make_public_api_token(admin, organization)
+
+ client = APIClient()
+
+ url = reverse("api-public:alerts-list")
+ response = client.get(
+ url + f"?alert_group_id={alert_group.public_primary_key}", format="json", HTTP_AUTHORIZATION=f"{token}"
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["count"] == 1
+
+
+@pytest.mark.django_db
+def test_get_list_alerts_filter_by_non_existing_incident(
+ alert_public_api_setup,
+ make_user_for_organization,
+ make_public_api_token,
+ make_alert_group,
+ make_alert,
+):
+ organization, alert_receive_channel, default_channel_filter = alert_public_api_setup
+ alert_group = make_alert_group(alert_receive_channel)
+ make_alert(alert_group, alert_raw_request_data)
+ admin = make_user_for_organization(organization)
+ _, token = make_public_api_token(admin, organization)
+
+ client = APIClient()
+
+ url = reverse("api-public:alerts-list")
+ response = client.get(url + "?alert_group_id=invalid_alert_group_id", format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["count"] == 0
+
+
+@pytest.mark.django_db
+def test_alerts_search(
+ alert_public_api_setup,
+ make_user_for_organization,
+ make_public_api_token,
+ make_alert_group,
+ make_alert,
+):
+ organization, alert_receive_channel, default_channel_filter = alert_public_api_setup
+ alert_group = make_alert_group(alert_receive_channel)
+ make_alert(alert_group, alert_raw_request_data)
+ admin = make_user_for_organization(organization)
+ _, token = make_public_api_token(admin, organization)
+
+ client = APIClient()
+
+ url = reverse("api-public:alerts-list")
+ response = client.get(url + "?search=evalMatches", format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["count"] == 1
+
+
+@pytest.mark.django_db
+def test_alerts_search_with_no_results(
+ alert_public_api_setup,
+ make_user_for_organization,
+ make_public_api_token,
+ make_alert_group,
+ make_alert,
+):
+ organization, alert_receive_channel, default_channel_filter = alert_public_api_setup
+ alert_group = make_alert_group(alert_receive_channel)
+ make_alert(alert_group, alert_raw_request_data)
+ admin = make_user_for_organization(organization)
+ _, token = make_public_api_token(admin, organization)
+
+ client = APIClient()
+
+ url = reverse("api-public:alerts-list")
+ response = client.get(url + "?search=impossible payload", format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["count"] == 0
diff --git a/engine/apps/public_api/tests/test_custom_actions.py b/engine/apps/public_api/tests/test_custom_actions.py
new file mode 100644
index 0000000000..2fc39f9231
--- /dev/null
+++ b/engine/apps/public_api/tests/test_custom_actions.py
@@ -0,0 +1,89 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+
+@pytest.mark.django_db
+def test_get_custom_actions(
+ make_organization_and_user_with_token,
+ make_custom_action,
+):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ custom_action = make_custom_action(organization=organization)
+
+ url = reverse("api-public:actions-list")
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ expected_payload = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": custom_action.public_primary_key,
+ "name": custom_action.name,
+ "team_id": None,
+ }
+ ],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_payload
+
+
+@pytest.mark.django_db
+def test_get_custom_actions_filter_by_name(
+ make_organization_and_user_with_token,
+ make_custom_action,
+):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ custom_action = make_custom_action(organization=organization)
+ make_custom_action(organization=organization)
+ url = reverse("api-public:actions-list")
+
+ response = client.get(f"{url}?name={custom_action.name}", format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ expected_payload = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": custom_action.public_primary_key,
+ "name": custom_action.name,
+ "team_id": None,
+ }
+ ],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_payload
+
+
+@pytest.mark.django_db
+def test_get_custom_actions_filter_by_name_empty_result(
+ make_organization_and_user_with_token,
+ make_custom_action,
+):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ make_custom_action(organization=organization)
+
+ url = reverse("api-public:actions-list")
+
+ response = client.get(f"{url}?name=NonExistentName", format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ expected_payload = {"count": 0, "next": None, "previous": None, "results": []}
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_payload
diff --git a/engine/apps/public_api/tests/test_demo_token/__init__.py b/engine/apps/public_api/tests/test_demo_token/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/public_api/tests/test_demo_token/test_alerts.py b/engine/apps/public_api/tests/test_demo_token/test_alerts.py
new file mode 100644
index 0000000000..4153ca2b89
--- /dev/null
+++ b/engine/apps/public_api/tests/test_demo_token/test_alerts.py
@@ -0,0 +1,110 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.public_api import constants as public_api_constants
+
+demo_alerts_results = []
+for alert_id, created_at in public_api_constants.DEMO_ALERT_IDS:
+ demo_alerts_results.append(
+ {
+ "id": alert_id,
+ "alert_group_id": public_api_constants.DEMO_INCIDENT_ID,
+ "created_at": created_at,
+ "payload": {
+ "state": "alerting",
+ "title": "[Alerting] Test notification",
+ "ruleId": 0,
+ "message": "Someone is testing the alert notification within grafana.",
+ "ruleUrl": "https://amixr.io/",
+ "ruleName": "Test notification",
+ "evalMatches": [
+ {"tags": None, "value": 100, "metric": "High value"},
+ {"tags": None, "value": 200, "metric": "Higher Value"},
+ ],
+ },
+ }
+ )
+
+# https://api-docs.amixr.io/#list-alerts
+demo_alerts_payload = {"count": 3, "next": None, "previous": None, "results": demo_alerts_results}
+
+
+@pytest.mark.django_db
+def test_get_alerts(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ url = reverse("api-public:alerts-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_alerts_payload
+
+
+@pytest.mark.django_db
+def test_get_alerts_filter_by_incident(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ url = reverse("api-public:alerts-list")
+ response = client.get(
+ url + f"?alert_group_id={public_api_constants.DEMO_INCIDENT_ID}", format="json", HTTP_AUTHORIZATION=token
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_alerts_payload
+
+
+@pytest.mark.django_db
+def test_get_alerts_filter_by_incident_no_results(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ url = reverse("api-public:alerts-list")
+ response = client.get(url + "?alert_group_id=impossible_alert_group_id", format="json", HTTP_AUTHORIZATION=token)
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["results"] == []
+
+
+@pytest.mark.django_db
+def test_get_alerts_search(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ url = reverse("api-public:alerts-list")
+ response = client.get(url + "?search=evalMatches", format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_alerts_payload
+
+
+@pytest.mark.django_db
+def test_get_alerts_search_no_results(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ url = reverse("api-public:alerts-list")
+ response = client.get(url + "?search=impossible_payload", format="json", HTTP_AUTHORIZATION=token)
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["results"] == []
diff --git a/engine/apps/public_api/tests/test_demo_token/test_custom_actions.py b/engine/apps/public_api/tests/test_demo_token/test_custom_actions.py
new file mode 100644
index 0000000000..6cf21903d6
--- /dev/null
+++ b/engine/apps/public_api/tests/test_demo_token/test_custom_actions.py
@@ -0,0 +1,32 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.public_api import constants as public_api_constants
+
+demo_custom_action_payload = {
+ "id": public_api_constants.DEMO_CUSTOM_ACTION_ID,
+ "name": public_api_constants.DEMO_CUSTOM_ACTION_NAME,
+ "team_id": None,
+}
+
+demo_custom_action_payload_list = {"count": 1, "next": None, "previous": None, "results": [demo_custom_action_payload]}
+
+
+@pytest.mark.django_db
+def test_demo_get_custom_actions_list(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ url = reverse("api-public:actions-list")
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == demo_custom_action_payload_list
diff --git a/engine/apps/public_api/tests/test_demo_token/test_escalation_policies.py b/engine/apps/public_api/tests/test_demo_token/test_escalation_policies.py
new file mode 100644
index 0000000000..4df862b6d8
--- /dev/null
+++ b/engine/apps/public_api/tests/test_demo_token/test_escalation_policies.py
@@ -0,0 +1,169 @@
+import pytest
+from django.urls import reverse
+from django.utils import timezone
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.alerts.models import EscalationPolicy
+from apps.public_api import constants as public_api_constants
+
+# https://api-docs.amixr.io/#get-escalation-policy
+demo_escalation_policy_payload = {
+ "id": public_api_constants.DEMO_ESCALATION_POLICY_ID_1,
+ "escalation_chain_id": public_api_constants.DEMO_ESCALATION_CHAIN_ID,
+ "position": 0,
+ "type": "wait",
+ "duration": timezone.timedelta(seconds=60).seconds,
+}
+
+# https://api-docs.amixr.io/#list-escalation-policies
+demo_escalation_policies_payload = {
+ "count": 2,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": public_api_constants.DEMO_ESCALATION_POLICY_ID_1,
+ "escalation_chain_id": public_api_constants.DEMO_ESCALATION_CHAIN_ID,
+ "position": 0,
+ "type": "wait",
+ "duration": timezone.timedelta(seconds=60).seconds,
+ },
+ {
+ "id": public_api_constants.DEMO_ESCALATION_POLICY_ID_2,
+ "escalation_chain_id": public_api_constants.DEMO_ESCALATION_CHAIN_ID,
+ "position": 1,
+ "type": "notify_person_next_each_time",
+ "persons_to_notify_next_each_time": ["U4DNY931HHJS5"],
+ },
+ ],
+}
+
+
+@pytest.mark.django_db
+def test_get_escalation_policies(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ url = reverse("api-public:escalation_policies-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_escalation_policies_payload
+
+
+@pytest.mark.django_db
+def test_get_escalation_policies_filter_by_route(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ url = reverse("api-public:escalation_policies-list")
+ response = client.get(
+ url + f"?route_id={public_api_constants.DEMO_ROUTE_ID_1}", format="json", HTTP_AUTHORIZATION=token
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_escalation_policies_payload
+
+
+@pytest.mark.django_db
+def test_create_escalation_policy(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ data_for_create = {
+ "escalation_chain_id": public_api_constants.DEMO_ESCALATION_CHAIN_ID,
+ "type": "notify_person_next_each_time",
+ "position": 0,
+ "persons_to_notify_next_each_time": [user.public_primary_key],
+ }
+ url = reverse("api-public:escalation_policies-list")
+ response = client.post(url, data=data_for_create, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_201_CREATED
+ # check on nothing change
+ assert response.json() == demo_escalation_policy_payload
+
+
+@pytest.mark.django_db
+def test_invalid_step_type(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ data_for_create = {
+ "escalation_chain_id": public_api_constants.DEMO_ESCALATION_CHAIN_ID,
+ "type": "this_is_invalid_step_type", # invalid step type
+ "position": 0,
+ "persons_to_notify_next_each_time": [user.public_primary_key],
+ }
+ url = reverse("api-public:escalation_policies-list")
+ response = client.post(url, data=data_for_create, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_201_CREATED
+ # check on nothing change
+ assert response.json() == demo_escalation_policy_payload
+
+
+@pytest.mark.django_db
+def test_update_escalation_step(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ data_for_update = {
+ "route_id": public_api_constants.DEMO_ROUTE_ID_1,
+ "type": "notify_person_next_each_time",
+ "position": 1,
+ "persons_to_notify_next_each_time": [user.public_primary_key],
+ }
+ url = reverse(
+ "api-public:escalation_policies-detail", kwargs={"pk": public_api_constants.DEMO_ESCALATION_POLICY_ID_1}
+ )
+ response = client.put(url, data=data_for_update, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ # check on nothing change
+ assert response.json() == demo_escalation_policy_payload
+
+
+@pytest.mark.django_db
+def test_delete_escalation_policy(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ escalation_policy = EscalationPolicy.objects.get(
+ public_primary_key=public_api_constants.DEMO_ESCALATION_POLICY_ID_1
+ )
+
+ url = reverse("api-public:escalation_policies-detail", args=[escalation_policy.public_primary_key])
+ response = client.delete(url, format="json", HTTP_AUTHORIZATION=token)
+
+ escalation_policy.refresh_from_db()
+
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+ # check on nothing change
+ escalation_policy.refresh_from_db()
+ assert escalation_policy is not None
diff --git a/engine/apps/public_api/tests/test_demo_token/test_incidents.py b/engine/apps/public_api/tests/test_demo_token/test_incidents.py
new file mode 100644
index 0000000000..26aa3b1a1c
--- /dev/null
+++ b/engine/apps/public_api/tests/test_demo_token/test_incidents.py
@@ -0,0 +1,82 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.alerts.models import AlertGroup
+from apps.public_api import constants as public_api_constants
+
+demo_incidents_payload = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": public_api_constants.DEMO_INCIDENT_ID,
+ "integration_id": public_api_constants.DEMO_INTEGRATION_ID,
+ "route_id": public_api_constants.DEMO_ROUTE_ID_1,
+ "alerts_count": 3,
+ "state": "resolved",
+ "created_at": public_api_constants.DEMO_INCIDENT_CREATED_AT,
+ "resolved_at": public_api_constants.DEMO_INCIDENT_RESOLVED_AT,
+ "acknowledged_at": None,
+ "title": None,
+ }
+ ],
+}
+
+
+@pytest.mark.django_db
+def test_create_incidents(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ url = reverse("api-public:alert_groups-list")
+ response = client.post(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
+
+
+@pytest.mark.django_db
+def test_get_incidents(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ url = reverse("api-public:alert_groups-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_incidents_payload
+
+
+@pytest.mark.django_db
+def test_delete_incidents(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ url = reverse("api-public:alert_groups-list")
+ incidents = AlertGroup.unarchived_objects.filter(public_primary_key=public_api_constants.DEMO_INCIDENT_ID)
+ total_count = incidents.count()
+ incident = incidents[0]
+ data = {
+ "mode": "delete",
+ }
+ response = client.delete(url + f"/{incident.public_primary_key}/", data, format="json", HTTP_AUTHORIZATION=token)
+ new_count = AlertGroup.unarchived_objects.filter(public_primary_key=public_api_constants.DEMO_INCIDENT_ID).count()
+
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+ incident.refresh_from_db()
+ assert total_count == new_count
+ assert incident is not None
diff --git a/engine/apps/public_api/tests/test_demo_token/test_integrations.py b/engine/apps/public_api/tests/test_demo_token/test_integrations.py
new file mode 100644
index 0000000000..be06f3673f
--- /dev/null
+++ b/engine/apps/public_api/tests/test_demo_token/test_integrations.py
@@ -0,0 +1,239 @@
+from urllib.parse import urljoin
+
+import pytest
+from django.conf import settings
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.alerts.models import AlertReceiveChannel
+from apps.public_api import constants as public_api_constants
+
+# https://api-docs.amixr.io/#post-integration
+demo_integration_post_payload = {
+ "id": public_api_constants.DEMO_INTEGRATION_ID,
+ "team_id": None,
+ "name": "Grafana :blush:",
+ "link": urljoin(settings.BASE_URL, f"/integrations/v1/grafana/{public_api_constants.DEMO_INTEGRATION_LINK_TOKEN}/"),
+ "heartbeat": None,
+ "default_route": {
+ "escalation_chain_id": None,
+ "id": public_api_constants.DEMO_ROUTE_ID_2,
+ "slack": {"channel_id": public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID},
+ },
+ "type": "grafana",
+ "templates": {
+ "grouping_key": None,
+ "resolve_signal": None,
+ "acknowledge_signal": None,
+ "slack": {"title": None, "message": None, "image_url": None},
+ "web": {"title": None, "message": None, "image_url": None},
+ "sms": {
+ "title": None,
+ },
+ "phone_call": {
+ "title": None,
+ },
+ "email": {
+ "title": None,
+ "message": None,
+ },
+ "telegram": {
+ "title": None,
+ "message": None,
+ "image_url": None,
+ },
+ },
+ "maintenance_mode": None,
+ "maintenance_started_at": None,
+ "maintenance_end_at": None,
+}
+
+# https://api-docs.amixr.io/#get-integration
+demo_integration_payload = {
+ "id": public_api_constants.DEMO_INTEGRATION_ID,
+ "team_id": None,
+ "name": "Grafana :blush:",
+ "link": urljoin(settings.BASE_URL, f"/integrations/v1/grafana/{public_api_constants.DEMO_INTEGRATION_LINK_TOKEN}/"),
+ "default_route": {
+ "escalation_chain_id": None,
+ "id": public_api_constants.DEMO_ROUTE_ID_2,
+ "slack": {"channel_id": public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID},
+ },
+ "type": "grafana",
+ "heartbeat": None,
+ "templates": {
+ "grouping_key": None,
+ "resolve_signal": None,
+ "acknowledge_signal": None,
+ "slack": {"title": None, "message": None, "image_url": None},
+ "web": {"title": None, "message": None, "image_url": None},
+ "sms": {
+ "title": None,
+ },
+ "phone_call": {
+ "title": None,
+ },
+ "email": {
+ "title": None,
+ "message": None,
+ },
+ "telegram": {
+ "title": None,
+ "message": None,
+ "image_url": None,
+ },
+ },
+ "maintenance_mode": None,
+ "maintenance_started_at": None,
+ "maintenance_end_at": None,
+}
+
+# https://api-docs.amixr.io/#list-integrations
+demo_integrations_payload = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": public_api_constants.DEMO_INTEGRATION_ID,
+ "team_id": None,
+ "name": "Grafana :blush:",
+ "link": urljoin(
+ settings.BASE_URL, f"/integrations/v1/grafana/{public_api_constants.DEMO_INTEGRATION_LINK_TOKEN}/"
+ ),
+ "default_route": {
+ "escalation_chain_id": None,
+ "id": public_api_constants.DEMO_ROUTE_ID_2,
+ "slack": {"channel_id": public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID},
+ },
+ "type": "grafana",
+ "heartbeat": None,
+ "templates": {
+ "grouping_key": None,
+ "resolve_signal": None,
+ "acknowledge_signal": None,
+ "slack": {
+ "title": None,
+ "message": None,
+ "image_url": None,
+ },
+ "web": {"title": None, "message": None, "image_url": None},
+ "sms": {
+ "title": None,
+ },
+ "phone_call": {
+ "title": None,
+ },
+ "email": {
+ "title": None,
+ "message": None,
+ },
+ "telegram": {
+ "title": None,
+ "message": None,
+ "image_url": None,
+ },
+ },
+ "maintenance_mode": None,
+ "maintenance_started_at": None,
+ "maintenance_end_at": None,
+ },
+ ],
+}
+
+
+@pytest.mark.django_db
+def test_get_integrations(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ url = reverse("api-public:integrations-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_integrations_payload
+
+
+@pytest.mark.django_db
+def test_create_integration(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ data_for_create = {"type": "grafana"}
+ url = reverse("api-public:integrations-list")
+ response = client.post(url, data=data_for_create, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_201_CREATED
+ # check on nothing change
+ assert response.json() == demo_integration_post_payload
+
+
+@pytest.mark.django_db
+def test_update_integration(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ integration = AlertReceiveChannel.objects.get(public_primary_key=public_api_constants.DEMO_INTEGRATION_ID)
+ data_for_update = {"name": "new_name"}
+ url = reverse("api-public:integrations-detail", args=[integration.public_primary_key])
+ response = client.put(url, data=data_for_update, format="json", HTTP_AUTHORIZATION=token)
+
+ integration.refresh_from_db()
+
+ assert response.status_code == status.HTTP_200_OK
+ # check on nothing change
+ assert response.json() == demo_integration_payload
+
+
+@pytest.mark.django_db
+def test_invalid_integration_type(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ data_for_create = {"type": "this_is_invalid_integration_type"}
+ url = reverse("api-public:integrations-list")
+ response = client.post(url, data=data_for_create, format="json", HTTP_AUTHORIZATION=token)
+ assert response.status_code == status.HTTP_201_CREATED
+ # check on nothing change
+ assert response.json() == demo_integration_post_payload
+
+
+@pytest.mark.django_db
+def test_delete_integration(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ integration = AlertReceiveChannel.objects.get(public_primary_key=public_api_constants.DEMO_INTEGRATION_ID)
+
+ url = reverse("api-public:integrations-detail", args=[integration.public_primary_key])
+ response = client.delete(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+ # check on nothing change
+ integration.refresh_from_db()
+ assert integration is not None
diff --git a/engine/apps/public_api/tests/test_demo_token/test_on_call_shift.py b/engine/apps/public_api/tests/test_demo_token/test_on_call_shift.py
new file mode 100644
index 0000000000..f4c4552d62
--- /dev/null
+++ b/engine/apps/public_api/tests/test_demo_token/test_on_call_shift.py
@@ -0,0 +1,172 @@
+import pytest
+from django.urls import reverse
+from django.utils import timezone
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.public_api import constants as public_api_constants
+from apps.schedules.models import CustomOnCallShift
+
+demo_on_call_shift_payload_1 = {
+ "id": public_api_constants.DEMO_ON_CALL_SHIFT_ID_1,
+ "team_id": None,
+ "name": public_api_constants.DEMO_ON_CALL_SHIFT_NAME_1,
+ "type": "single_event",
+ "time_zone": None,
+ "level": 0,
+ "start": public_api_constants.DEMO_ON_CALL_SHIFT_START_1,
+ "duration": public_api_constants.DEMO_ON_CALL_SHIFT_DURATION,
+ "users": [public_api_constants.DEMO_USER_ID],
+}
+
+demo_on_call_shift_payload_2 = {
+ "id": public_api_constants.DEMO_ON_CALL_SHIFT_ID_2,
+ "team_id": None,
+ "name": public_api_constants.DEMO_ON_CALL_SHIFT_NAME_2,
+ "type": "recurrent_event",
+ "time_zone": None,
+ "level": 0,
+ "start": public_api_constants.DEMO_ON_CALL_SHIFT_START_2,
+ "duration": public_api_constants.DEMO_ON_CALL_SHIFT_DURATION,
+ "frequency": "weekly",
+ "interval": 2,
+ "week_start": "SU",
+ "users": [public_api_constants.DEMO_USER_ID],
+ "by_day": public_api_constants.DEMO_ON_CALL_SHIFT_BY_DAY,
+ "by_month": None,
+ "by_monthday": None,
+}
+
+demo_on_call_shift_payload_list = {
+ "count": 2,
+ "next": None,
+ "previous": None,
+ "results": [demo_on_call_shift_payload_1, demo_on_call_shift_payload_2],
+}
+
+
+@pytest.mark.django_db
+def test_demo_get_on_call_shift_list(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ url = reverse("api-public:on_call_shifts-list")
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == demo_on_call_shift_payload_list
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "demo_on_call_shift_id,payload",
+ [
+ (public_api_constants.DEMO_ON_CALL_SHIFT_ID_1, demo_on_call_shift_payload_1),
+ (public_api_constants.DEMO_ON_CALL_SHIFT_ID_2, demo_on_call_shift_payload_2),
+ ],
+)
+def test_demo_get_on_call_shift_1(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+ demo_on_call_shift_id,
+ payload,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ url = reverse("api-public:on_call_shifts-detail", kwargs={"pk": demo_on_call_shift_id})
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == payload
+
+
+@pytest.mark.django_db
+def test_demo_post_on_call_shift(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ url = reverse("api-public:on_call_shifts-list")
+
+ data = {
+ "schedule_id": public_api_constants.DEMO_SCHEDULE_ID_CALENDAR,
+ "name": "New demo shift",
+ "type": CustomOnCallShift.TYPE_SINGLE_EVENT,
+ "start": timezone.now().replace(tzinfo=None, microsecond=0).isoformat(),
+ "duration": 3600,
+ }
+
+ response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.data == demo_on_call_shift_payload_1
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "demo_on_call_shift_id,payload",
+ [
+ (public_api_constants.DEMO_ON_CALL_SHIFT_ID_1, demo_on_call_shift_payload_1),
+ (public_api_constants.DEMO_ON_CALL_SHIFT_ID_2, demo_on_call_shift_payload_2),
+ ],
+)
+def test_demo_update_on_call_shift(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+ demo_on_call_shift_id,
+ payload,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ data = {"name": "Updated demo name"}
+
+ url = reverse("api-public:on_call_shifts-detail", kwargs={"pk": demo_on_call_shift_id})
+
+ response = client.put(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == payload
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "demo_on_call_shift_id",
+ [
+ public_api_constants.DEMO_ON_CALL_SHIFT_ID_1,
+ public_api_constants.DEMO_ON_CALL_SHIFT_ID_2,
+ ],
+)
+def test_demo_delete_on_call_shift(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+ demo_on_call_shift_id,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ url = reverse("api-public:on_call_shifts-detail", kwargs={"pk": demo_on_call_shift_id})
+
+ response = client.delete(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+ assert CustomOnCallShift.objects.filter(public_primary_key=demo_on_call_shift_id).exists()
diff --git a/engine/apps/public_api/tests/test_demo_token/test_personal_notification_rules.py b/engine/apps/public_api/tests/test_demo_token/test_personal_notification_rules.py
new file mode 100644
index 0000000000..d0abf315b2
--- /dev/null
+++ b/engine/apps/public_api/tests/test_demo_token/test_personal_notification_rules.py
@@ -0,0 +1,225 @@
+import pytest
+from django.urls import reverse
+from django.utils import timezone
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.base.models import UserNotificationPolicy
+from apps.base.models.user_notification_policy import NotificationChannelPublicAPIOptions
+from apps.public_api import constants as public_api_constants
+
+TYPE_WAIT = "wait"
+
+demo_personal_notification_rule_payload_1 = {
+ "id": public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_1,
+ "user_id": public_api_constants.DEMO_USER_ID,
+ "position": 0,
+ "important": False,
+ "type": "notify_by_sms",
+}
+
+demo_personal_notification_rule_payload_2 = {
+ "id": public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_2,
+ "user_id": public_api_constants.DEMO_USER_ID,
+ "position": 1,
+ "duration": timezone.timedelta(seconds=300).seconds,
+ "important": False,
+ "type": "wait",
+}
+
+demo_personal_notification_rule_payload_3 = {
+ "id": public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_3,
+ "user_id": public_api_constants.DEMO_USER_ID,
+ "position": 2,
+ "important": False,
+ "type": "notify_by_phone_call",
+}
+
+demo_personal_notification_rule_payload_4 = {
+ "id": public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_4,
+ "user_id": public_api_constants.DEMO_USER_ID,
+ "position": 0,
+ "important": True,
+ "type": "notify_by_phone_call",
+}
+
+demo_personal_notification_rules_payload = {
+ "count": 4,
+ "next": None,
+ "previous": None,
+ "results": [
+ demo_personal_notification_rule_payload_1,
+ demo_personal_notification_rule_payload_2,
+ demo_personal_notification_rule_payload_3,
+ demo_personal_notification_rule_payload_4,
+ ],
+}
+
+demo_personal_notification_rules_non_important_payload = {
+ "count": 3,
+ "next": None,
+ "previous": None,
+ "results": [
+ demo_personal_notification_rule_payload_1,
+ demo_personal_notification_rule_payload_2,
+ demo_personal_notification_rule_payload_3,
+ ],
+}
+
+demo_personal_notification_rules_important_payload = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [
+ demo_personal_notification_rule_payload_4,
+ ],
+}
+
+
+@pytest.mark.django_db
+def test_get_personal_notification_rule(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ _ = make_data_for_demo_token(organization, user)
+
+ demo_personal_notification_rule_1 = UserNotificationPolicy.objects.get(
+ public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_1
+ )
+ client = APIClient()
+
+ url = reverse(
+ "api-public:personal_notification_rules-detail",
+ kwargs={"pk": demo_personal_notification_rule_1.public_primary_key},
+ )
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_personal_notification_rule_payload_1
+
+
+@pytest.mark.django_db
+def test_get_personal_notification_rules_list(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ _ = make_data_for_demo_token(organization, user)
+
+ client = APIClient()
+
+ url = reverse("api-public:personal_notification_rules-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_personal_notification_rules_payload
+
+
+@pytest.mark.django_db
+def test_get_personal_notification_rules_list_important(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ _ = make_data_for_demo_token(organization, user)
+ client = APIClient()
+
+ url = reverse("api-public:personal_notification_rules-list")
+ response = client.get(url + "?important=true", format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_personal_notification_rules_important_payload
+
+
+@pytest.mark.django_db
+def test_get_personal_notification_rules_list_non_important(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ _ = make_data_for_demo_token(organization, user)
+
+ client = APIClient()
+
+ url = reverse("api-public:personal_notification_rules-list")
+ response = client.get(url + "?important=false", format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_personal_notification_rules_non_important_payload
+
+
+@pytest.mark.django_db
+def test_update_personal_notification_rule(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ _ = make_data_for_demo_token(organization, user)
+ demo_personal_notification_rule_1 = UserNotificationPolicy.objects.get(
+ public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_1
+ )
+ client = APIClient()
+
+ url = reverse(
+ "api-public:personal_notification_rules-detail",
+ kwargs={"pk": demo_personal_notification_rule_1.public_primary_key},
+ )
+
+ data_to_update = {
+ "type": NotificationChannelPublicAPIOptions.LABELS[UserNotificationPolicy.NotificationChannel.SLACK]
+ }
+ response = client.put(url, format="json", HTTP_AUTHORIZATION=token, data=data_to_update)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_personal_notification_rule_payload_1
+ # check on nothing change
+ demo_personal_notification_rule_1.refresh_from_db()
+ assert demo_personal_notification_rule_1.notify_by != UserNotificationPolicy.NotificationChannel.SLACK
+
+
+@pytest.mark.django_db
+def test_create_personal_notification_rule(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ _ = make_data_for_demo_token(organization, user)
+ client = APIClient()
+
+ url = reverse("api-public:personal_notification_rules-list")
+ data_for_create = {
+ "user_id": user.public_primary_key,
+ "type": TYPE_WAIT,
+ "position": 1,
+ "duration": timezone.timedelta(seconds=300).seconds,
+ }
+ response = client.post(url, format="json", HTTP_AUTHORIZATION=token, data=data_for_create)
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.json() == demo_personal_notification_rule_payload_1
+
+
+@pytest.mark.django_db
+def test_delete_personal_notification_rule(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ _ = make_data_for_demo_token(organization, user)
+ demo_personal_notification_rule_1 = UserNotificationPolicy.objects.get(
+ public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_1
+ )
+ client = APIClient()
+
+ url = reverse(
+ "api-public:personal_notification_rules-detail",
+ kwargs={"pk": demo_personal_notification_rule_1.public_primary_key},
+ )
+
+ response = client.delete(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+ # check on nothing change
+ demo_personal_notification_rule_1.refresh_from_db()
+ assert demo_personal_notification_rule_1 is not None
diff --git a/engine/apps/public_api/tests/test_demo_token/test_resolution_notes.py b/engine/apps/public_api/tests/test_demo_token/test_resolution_notes.py
new file mode 100644
index 0000000000..888760e9d5
--- /dev/null
+++ b/engine/apps/public_api/tests/test_demo_token/test_resolution_notes.py
@@ -0,0 +1,117 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.alerts.models import ResolutionNote
+from apps.public_api import constants as public_api_constants
+
+demo_resolution_note_payload = {
+ "id": public_api_constants.DEMO_RESOLUTION_NOTE_ID,
+ "alert_group_id": public_api_constants.DEMO_INCIDENT_ID,
+ "author": public_api_constants.DEMO_USER_ID,
+ "source": public_api_constants.DEMO_RESOLUTION_NOTE_SOURCE,
+ "created_at": public_api_constants.DEMO_RESOLUTION_NOTE_CREATED_AT,
+ "text": public_api_constants.DEMO_RESOLUTION_NOTE_TEXT,
+}
+
+demo_resolution_note_payload_list = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [demo_resolution_note_payload],
+}
+
+
+@pytest.mark.django_db
+def test_demo_get_resolution_note_list(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ url = reverse("api-public:resolution_notes-list")
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == demo_resolution_note_payload_list
+
+
+@pytest.mark.django_db
+def test_demo_get_resolution_note(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ url = reverse("api-public:resolution_notes-detail", kwargs={"pk": public_api_constants.DEMO_RESOLUTION_NOTE_ID})
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == demo_resolution_note_payload
+
+
+@pytest.mark.django_db
+def test_demo_post_resolution_note(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ url = reverse("api-public:resolution_notes-list")
+
+ data = {"alert_group_id": public_api_constants.DEMO_INCIDENT_ID, "text": "New demo text"}
+
+ response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.data == demo_resolution_note_payload
+
+
+@pytest.mark.django_db
+def test_demo_update_resolution_note(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ data = {"alert_group_id": public_api_constants.DEMO_INCIDENT_ID, "text": "Updated demo text"}
+
+ url = reverse("api-public:resolution_notes-detail", kwargs={"pk": public_api_constants.DEMO_RESOLUTION_NOTE_ID})
+
+ response = client.put(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == demo_resolution_note_payload
+
+
+@pytest.mark.django_db
+def test_demo_delete_resolution_note(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ url = reverse("api-public:resolution_notes-detail", kwargs={"pk": public_api_constants.DEMO_RESOLUTION_NOTE_ID})
+
+ response = client.delete(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+ assert ResolutionNote.objects.filter(public_primary_key=public_api_constants.DEMO_RESOLUTION_NOTE_ID).exists()
diff --git a/engine/apps/public_api/tests/test_demo_token/test_routes.py b/engine/apps/public_api/tests/test_demo_token/test_routes.py
new file mode 100644
index 0000000000..cd8938dbf8
--- /dev/null
+++ b/engine/apps/public_api/tests/test_demo_token/test_routes.py
@@ -0,0 +1,182 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.alerts.models import ChannelFilter
+from apps.public_api import constants as public_api_constants
+
+# https://api-docs.amixr.io/#get-route
+demo_route_payload = {
+ "id": public_api_constants.DEMO_ROUTE_ID_1,
+ "escalation_chain_id": None,
+ "integration_id": public_api_constants.DEMO_INTEGRATION_ID,
+ "routing_regex": "us-(east|west)",
+ "position": 0,
+ "is_the_last_route": False,
+ "slack": {"channel_id": public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID},
+}
+
+# https://api-docs.amixr.io/#list-routes
+demo_routes_payload = {
+ "count": 2,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": public_api_constants.DEMO_ROUTE_ID_1,
+ "escalation_chain_id": None,
+ "integration_id": public_api_constants.DEMO_INTEGRATION_ID,
+ "routing_regex": "us-(east|west)",
+ "position": 0,
+ "is_the_last_route": False,
+ "slack": {"channel_id": public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID},
+ },
+ {
+ "id": public_api_constants.DEMO_ROUTE_ID_2,
+ "escalation_chain_id": None,
+ "integration_id": public_api_constants.DEMO_INTEGRATION_ID,
+ "routing_regex": ".*",
+ "position": 1,
+ "is_the_last_route": True,
+ "slack": {"channel_id": public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID},
+ },
+ ],
+}
+
+
+@pytest.mark.django_db
+def test_get_route(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ channel_filter = ChannelFilter.objects.get(public_primary_key=public_api_constants.DEMO_ROUTE_ID_1)
+
+ url = reverse("api-public:routes-detail", kwargs={"pk": channel_filter.public_primary_key})
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_route_payload
+
+
+@pytest.mark.django_db
+def test_get_routes_list(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ url = reverse("api-public:routes-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_routes_payload
+
+
+@pytest.mark.django_db
+def test_get_routes_filter_by_integration_id(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ url = reverse("api-public:routes-list")
+ response = client.get(
+ url + f"?integration_id={public_api_constants.DEMO_INTEGRATION_ID}", format="json", HTTP_AUTHORIZATION=token
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_routes_payload
+
+
+@pytest.mark.django_db
+def test_create_route(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ url = reverse("api-public:routes-list")
+ data_for_create = {
+ "integration_id": public_api_constants.DEMO_INTEGRATION_ID,
+ "routing_regex": "testreg",
+ }
+ response = client.post(url, format="json", HTTP_AUTHORIZATION=token, data=data_for_create)
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.json() == demo_route_payload
+
+
+@pytest.mark.django_db
+def test_invalid_route_data(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ url = reverse("api-public:routes-list")
+ data_for_create = {
+ "integration_id": public_api_constants.DEMO_INTEGRATION_ID,
+ "routing_regex": None, # routing_regex cannot be null for non-default filters
+ }
+ response = client.post(url, format="json", HTTP_AUTHORIZATION=token, data=data_for_create)
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.json() == demo_route_payload
+
+
+@pytest.mark.django_db
+def test_update_route(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ channel_filter = ChannelFilter.objects.get(public_primary_key=public_api_constants.DEMO_ROUTE_ID_1)
+
+ url = reverse("api-public:routes-detail", kwargs={"pk": channel_filter.public_primary_key})
+ data_to_update = {
+ "routing_regex": "testreg_updated",
+ }
+
+ assert channel_filter.filtering_term != data_to_update["routing_regex"]
+
+ response = client.put(url, format="json", HTTP_AUTHORIZATION=token, data=data_to_update)
+
+ assert response.status_code == status.HTTP_200_OK
+ # check on nothing change
+ channel_filter.refresh_from_db()
+ assert response.json() == demo_route_payload
+ assert channel_filter.filtering_term != data_to_update["routing_regex"]
+
+
+@pytest.mark.django_db
+def test_delete_route(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+ channel_filter = ChannelFilter.objects.get(public_primary_key=public_api_constants.DEMO_ROUTE_ID_1)
+
+ url = reverse("api-public:routes-detail", kwargs={"pk": channel_filter.public_primary_key})
+ response = client.delete(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+ # check on nothing change
+ channel_filter.refresh_from_db()
+ assert channel_filter is not None
diff --git a/engine/apps/public_api/tests/test_demo_token/test_schedules.py b/engine/apps/public_api/tests/test_demo_token/test_schedules.py
new file mode 100644
index 0000000000..9a56955bf1
--- /dev/null
+++ b/engine/apps/public_api/tests/test_demo_token/test_schedules.py
@@ -0,0 +1,164 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.public_api import constants as public_api_constants
+from apps.schedules.models import OnCallSchedule
+
+demo_ical_schedule_payload = {
+ "id": public_api_constants.DEMO_SCHEDULE_ID_ICAL,
+ "team_id": None,
+ "name": public_api_constants.DEMO_SCHEDULE_NAME_ICAL,
+ "type": "ical",
+ "ical_url_primary": public_api_constants.DEMO_SCHEDULE_ICAL_URL_PRIMARY,
+ "ical_url_overrides": public_api_constants.DEMO_SCHEDULE_ICAL_URL_OVERRIDES,
+ "on_call_now": [public_api_constants.DEMO_USER_ID],
+ "slack": {
+ "channel_id": public_api_constants.DEMO_SLACK_CHANNEL_SLACK_ID,
+ "user_group_id": public_api_constants.DEMO_SLACK_USER_GROUP_SLACK_ID,
+ },
+}
+
+demo_calendar_schedule_payload = {
+ "id": public_api_constants.DEMO_SCHEDULE_ID_CALENDAR,
+ "team_id": None,
+ "name": public_api_constants.DEMO_SCHEDULE_NAME_CALENDAR,
+ "type": "calendar",
+ "time_zone": "America/New_york",
+ "on_call_now": [public_api_constants.DEMO_USER_ID],
+ "shifts": [
+ public_api_constants.DEMO_ON_CALL_SHIFT_ID_1,
+ public_api_constants.DEMO_ON_CALL_SHIFT_ID_2,
+ ],
+ "slack": {
+ "channel_id": public_api_constants.DEMO_SLACK_CHANNEL_SLACK_ID,
+ "user_group_id": public_api_constants.DEMO_SLACK_USER_GROUP_SLACK_ID,
+ },
+ "ical_url_overrides": None,
+}
+
+demo_schedules_payload = {
+ "count": 2,
+ "next": None,
+ "previous": None,
+ "results": [
+ demo_ical_schedule_payload,
+ demo_calendar_schedule_payload,
+ ],
+}
+
+
+@pytest.mark.django_db
+def test_get_schedule(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ schedule = OnCallSchedule.objects.get(public_primary_key=public_api_constants.DEMO_SCHEDULE_ID_ICAL)
+
+ url = reverse("api-public:schedules-detail", kwargs={"pk": schedule.public_primary_key})
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == demo_ical_schedule_payload
+
+
+@pytest.mark.django_db
+def test_create_schedule(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ url = reverse("api-public:schedules-list")
+
+ data = {
+ "name": "schedule test name",
+ "type": "ical",
+ }
+
+ response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_201_CREATED
+ # check that demo instance was returned
+ assert response.data == demo_ical_schedule_payload
+
+
+@pytest.mark.django_db
+def test_update_ical_schedule(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ schedule = OnCallSchedule.objects.get(public_primary_key=public_api_constants.DEMO_SCHEDULE_ID_ICAL)
+
+ url = reverse("api-public:schedules-detail", kwargs={"pk": schedule.public_primary_key})
+
+ data = {
+ "name": "NEW NAME",
+ }
+
+ response = client.put(url, data=data, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ # check on nothing change
+ schedule.refresh_from_db()
+ assert schedule.name != data["name"]
+ assert response.data == demo_ical_schedule_payload
+
+
+@pytest.mark.django_db
+def test_update_calendar_schedule(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ schedule = OnCallSchedule.objects.get(public_primary_key=public_api_constants.DEMO_SCHEDULE_ID_CALENDAR)
+
+ url = reverse("api-public:schedules-detail", kwargs={"pk": schedule.public_primary_key})
+
+ data = {
+ "name": "NEW NAME",
+ }
+
+ response = client.put(url, data=data, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ # check on nothing change
+ schedule.refresh_from_db()
+ assert schedule.name != data["name"]
+ assert response.data == demo_calendar_schedule_payload
+
+
+@pytest.mark.django_db
+def test_delete_schedule(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ schedule = OnCallSchedule.objects.get(public_primary_key=public_api_constants.DEMO_SCHEDULE_ID_ICAL)
+
+ url = reverse("api-public:schedules-detail", kwargs={"pk": schedule.public_primary_key})
+
+ response = client.delete(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+ # check on nothing change
+ schedule.refresh_from_db()
+ assert schedule is not None
diff --git a/engine/apps/public_api/tests/test_demo_token/test_slack_channels.py b/engine/apps/public_api/tests/test_demo_token/test_slack_channels.py
new file mode 100644
index 0000000000..80a11bdcab
--- /dev/null
+++ b/engine/apps/public_api/tests/test_demo_token/test_slack_channels.py
@@ -0,0 +1,34 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.public_api import constants as public_api_constants
+
+demo_slack_channels_payload = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "name": public_api_constants.DEMO_SLACK_CHANNEL_NAME,
+ "slack_id": public_api_constants.DEMO_SLACK_CHANNEL_SLACK_ID,
+ }
+ ],
+}
+
+
+@pytest.mark.django_db
+def test_get_slack_channels_list(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ url = reverse("api-public:slack_channels-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_slack_channels_payload
diff --git a/engine/apps/public_api/tests/test_demo_token/test_user_groups.py b/engine/apps/public_api/tests/test_demo_token/test_user_groups.py
new file mode 100644
index 0000000000..08ee995cb5
--- /dev/null
+++ b/engine/apps/public_api/tests/test_demo_token/test_user_groups.py
@@ -0,0 +1,36 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.public_api import constants as public_api_constants
+
+demo_user_group_payload = {
+ "id": public_api_constants.DEMO_SLACK_USER_GROUP_ID,
+ "type": "slack_based",
+ "slack": {
+ "id": public_api_constants.DEMO_SLACK_USER_GROUP_SLACK_ID,
+ "name": public_api_constants.DEMO_SLACK_USER_GROUP_NAME,
+ "handle": public_api_constants.DEMO_SLACK_USER_GROUP_HANDLE,
+ },
+}
+
+demo_user_group_payload_list = {"count": 1, "next": None, "previous": None, "results": [demo_user_group_payload]}
+
+
+@pytest.mark.django_db
+def test_demo_get_user_groups_list(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_data_for_demo_token,
+):
+
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+ client = APIClient()
+ _ = make_data_for_demo_token(organization, user)
+
+ url = reverse("api-public:user_groups-list")
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == demo_user_group_payload_list
diff --git a/engine/apps/public_api/tests/test_demo_token/test_users.py b/engine/apps/public_api/tests/test_demo_token/test_users.py
new file mode 100644
index 0000000000..ffa4bfdb83
--- /dev/null
+++ b/engine/apps/public_api/tests/test_demo_token/test_users.py
@@ -0,0 +1,91 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.public_api import constants as public_api_constants
+
+# NB can compare with https://api-docs.amixr.io/#get-user
+
+demo_token_user_payload = {
+ "id": public_api_constants.DEMO_USER_ID,
+ "email": public_api_constants.DEMO_USER_EMAIL,
+ "slack": {"user_id": public_api_constants.DEMO_SLACK_USER_ID, "team_id": public_api_constants.DEMO_SLACK_TEAM_ID},
+ "username": public_api_constants.DEMO_USER_USERNAME,
+ "role": "admin",
+ "is_phone_number_verified": False,
+}
+
+# https://api-docs.amixr.io/#list-users
+demo_token_users_payload = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": public_api_constants.DEMO_USER_ID,
+ "email": public_api_constants.DEMO_USER_EMAIL,
+ "slack": {
+ "user_id": public_api_constants.DEMO_SLACK_USER_ID,
+ "team_id": public_api_constants.DEMO_SLACK_TEAM_ID,
+ },
+ "username": public_api_constants.DEMO_USER_USERNAME,
+ "role": "admin",
+ "is_phone_number_verified": False,
+ }
+ ],
+}
+
+
+@pytest.mark.django_db
+def test_get_user(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+
+ client = APIClient()
+
+ url = reverse("api-public:users-detail", args=[user.public_primary_key])
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_token_user_payload
+
+ # get current user
+ url = reverse("api-public:users-detail", args=["current"])
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_token_user_payload
+
+
+@pytest.mark.django_db
+def test_get_users(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+):
+ organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token()
+
+ client = APIClient()
+
+ url = reverse("api-public:users-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == demo_token_users_payload
+
+
+@pytest.mark.django_db
+def test_forbidden_access(
+ make_organization_and_user_with_slack_identities_for_demo_token,
+ make_organization_and_user_with_token,
+):
+ _, user, _ = make_organization_and_user_with_slack_identities_for_demo_token()
+ _, _, another_org_token = make_organization_and_user_with_token()
+
+ client = APIClient()
+
+ url = reverse("api-public:users-detail", args=[user.public_primary_key])
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=another_org_token)
+
+ assert response.status_code == status.HTTP_404_NOT_FOUND
diff --git a/engine/apps/public_api/tests/test_escalation_chain.py b/engine/apps/public_api/tests/test_escalation_chain.py
new file mode 100644
index 0000000000..99f6778ec8
--- /dev/null
+++ b/engine/apps/public_api/tests/test_escalation_chain.py
@@ -0,0 +1,75 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+
+@pytest.mark.django_db
+def test_get_escalation_chains(make_organization_and_user_with_token):
+ organization, user, token = make_organization_and_user_with_token()
+ escalation_chain = organization.escalation_chains.create(name="test")
+
+ client = APIClient()
+
+ url = reverse("api-public:escalation_chains-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ expected_data = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": escalation_chain.public_primary_key,
+ "team_id": None,
+ "name": "test",
+ }
+ ],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_data
+
+
+@pytest.mark.django_db
+def test_create_escalation_chain(make_organization_and_user_with_token):
+ organization, user, token = make_organization_and_user_with_token()
+
+ data = {"name": "test", "team_id": None}
+
+ client = APIClient()
+ url = reverse("api-public:escalation_chains-list")
+ response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=token)
+
+ escalation_chain = organization.escalation_chains.get(name="test")
+ expected_data = {
+ "id": escalation_chain.public_primary_key,
+ "team_id": None,
+ "name": "test",
+ }
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.data == expected_data
+
+
+@pytest.mark.django_db
+def test_change_name(make_organization_and_user_with_token):
+ organization, user, token = make_organization_and_user_with_token()
+ escalation_chain = organization.escalation_chains.create(name="test")
+
+ data = {
+ "id": escalation_chain.public_primary_key,
+ "name": "changed",
+ }
+
+ client = APIClient()
+ url = reverse("api-public:escalation_chains-detail", kwargs={"pk": escalation_chain.public_primary_key})
+ response = client.put(url, data=data, format="json", HTTP_AUTHORIZATION=token)
+
+ expected_data = {
+ "id": escalation_chain.public_primary_key,
+ "team_id": None,
+ "name": "changed",
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_data
diff --git a/engine/apps/public_api/tests/test_escalation_policies.py b/engine/apps/public_api/tests/test_escalation_policies.py
new file mode 100644
index 0000000000..06aa03452e
--- /dev/null
+++ b/engine/apps/public_api/tests/test_escalation_policies.py
@@ -0,0 +1,221 @@
+import pytest
+from django.urls import reverse
+from django.utils import timezone
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.alerts.models import EscalationPolicy
+from apps.public_api.serializers import EscalationPolicySerializer
+
+
+@pytest.fixture
+def escalation_policies_setup():
+ def _escalation_policies_setup(organization, user):
+ escalation_chain = organization.escalation_chains.create(name="test_chain")
+
+ escalation_policy_notify_persons = escalation_chain.escalation_policies.create(
+ step=EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS
+ )
+ escalation_policy_notify_persons.notify_to_users_queue.add(user)
+
+ escalation_policy_wait = escalation_chain.escalation_policies.create(
+ step=EscalationPolicy.STEP_WAIT,
+ wait_delay=EscalationPolicy.FIVE_MINUTES,
+ )
+
+ escalation_policy_notify_persons_empty = escalation_chain.escalation_policies.create(
+ step=EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
+ )
+
+ escalation_policy_notify_persons_payload = {
+ "id": escalation_policy_notify_persons.public_primary_key,
+ "escalation_chain_id": escalation_policy_notify_persons.escalation_chain.public_primary_key,
+ "position": escalation_policy_notify_persons.order,
+ "type": "notify_persons",
+ "important": False,
+ "persons_to_notify": [user.public_primary_key],
+ }
+
+ escalation_policy_wait_payload = {
+ "id": escalation_policy_wait.public_primary_key,
+ "escalation_chain_id": escalation_policy_wait.escalation_chain.public_primary_key,
+ "position": escalation_policy_wait.order,
+ "type": "wait",
+ "duration": timezone.timedelta(seconds=300).seconds,
+ }
+
+ escalation_policy_notify_persons_empty_payload = {
+ "id": escalation_policy_notify_persons_empty.public_primary_key,
+ "escalation_chain_id": escalation_policy_notify_persons_empty.escalation_chain.public_primary_key,
+ "position": escalation_policy_notify_persons_empty.order,
+ "type": "notify_persons",
+ "important": False,
+ "persons_to_notify": [],
+ }
+
+ escalation_policies_payload = {
+ "count": 3,
+ "next": None,
+ "previous": None,
+ "results": [
+ escalation_policy_notify_persons_payload,
+ escalation_policy_wait_payload,
+ escalation_policy_notify_persons_empty_payload,
+ ],
+ }
+ return (
+ escalation_chain,
+ (escalation_policy_notify_persons, escalation_policy_wait, escalation_policy_notify_persons_empty),
+ escalation_policies_payload,
+ )
+
+ return _escalation_policies_setup
+
+
+@pytest.mark.django_db
+def test_get_escalation_policies(
+ make_organization_and_user_with_token,
+ escalation_policies_setup,
+):
+ organization, user, token = make_organization_and_user_with_token()
+ _, _, escalation_policies_payload = escalation_policies_setup(organization, user)
+
+ client = APIClient()
+
+ url = reverse("api-public:escalation_policies-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ escalation_policies = EscalationPolicy.objects.all()
+ serializer = EscalationPolicySerializer(escalation_policies, many=True)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == escalation_policies_payload
+ assert response.data["results"] == serializer.data
+
+
+@pytest.mark.django_db
+def test_get_escalation_policies_filter_by_route(
+ make_organization_and_user_with_token,
+ escalation_policies_setup,
+):
+ organization, user, token = make_organization_and_user_with_token()
+ escalation_chain, _, escalation_policies_payload = escalation_policies_setup(organization, user)
+
+ client = APIClient()
+
+ url = reverse("api-public:escalation_policies-list")
+ response = client.get(
+ url + f"?escalation_chain_id={escalation_chain.public_primary_key}", format="json", HTTP_AUTHORIZATION=token
+ )
+
+ escalation_policies = EscalationPolicy.objects.filter(
+ escalation_chain__public_primary_key=escalation_chain.public_primary_key
+ )
+
+ serializer = EscalationPolicySerializer(escalation_policies, many=True)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == escalation_policies_payload
+ assert response.data["results"] == serializer.data
+
+
+@pytest.mark.django_db
+def test_create_escalation_policy(
+ make_organization_and_user_with_token,
+ escalation_policies_setup,
+):
+ organization, user, token = make_organization_and_user_with_token()
+ escalation_chain, _, _ = escalation_policies_setup(organization, user)
+
+ data_for_create = {
+ "escalation_chain_id": escalation_chain.public_primary_key,
+ "type": "notify_person_next_each_time",
+ "position": 0,
+ "persons_to_notify_next_each_time": [user.public_primary_key],
+ }
+
+ client = APIClient()
+ url = reverse("api-public:escalation_policies-list")
+ response = client.post(url, data=data_for_create, format="json", HTTP_AUTHORIZATION=token)
+
+ escalation_policy = EscalationPolicy.objects.get(public_primary_key=response.data["id"])
+ serializer = EscalationPolicySerializer(escalation_policy)
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.data == serializer.data
+
+
+@pytest.mark.django_db
+def test_invalid_step_type(
+ make_organization_and_user_with_token,
+ escalation_policies_setup,
+):
+ organization, user, token = make_organization_and_user_with_token()
+ escalation_chain, _, _ = escalation_policies_setup(organization, user)
+
+ data_for_create = {
+ "route_id": escalation_chain.public_primary_key,
+ "type": "this_is_invalid_step_type", # invalid step type
+ "position": 0,
+ "persons_to_notify_next_each_time": [user.public_primary_key],
+ }
+
+ client = APIClient()
+ url = reverse("api-public:escalation_policies-list")
+ response = client.post(url, data=data_for_create, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_change_step_importance(
+ make_organization_and_user_with_token,
+ escalation_policies_setup,
+):
+ organization, user, token = make_organization_and_user_with_token()
+ _, escalation_policies, _ = escalation_policies_setup(organization, user)
+ escalation_policy_notify_persons = escalation_policies[0]
+
+ client = APIClient()
+ url = reverse(
+ "api-public:escalation_policies-detail", kwargs={"pk": escalation_policy_notify_persons.public_primary_key}
+ )
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ step_type = escalation_policy_notify_persons.step
+ assert step_type not in EscalationPolicy.IMPORTANT_STEPS_SET
+ assert response.data["important"] is False
+
+ data_to_change = {"important": True}
+ response = client.put(url, data=data_to_change, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data["important"] is True
+ escalation_policy_notify_persons.refresh_from_db()
+
+ assert escalation_policy_notify_persons.step == EscalationPolicy.DEFAULT_TO_IMPORTANT_STEP_MAPPING[step_type]
+
+
+@pytest.mark.django_db
+def test_create_important_step(
+ make_organization_and_user_with_token,
+ escalation_policies_setup,
+):
+ organization, user, token = make_organization_and_user_with_token()
+ escalation_chain, _, _ = escalation_policies_setup(organization, user)
+
+ data_for_create = {
+ "escalation_chain_id": escalation_chain.public_primary_key,
+ "type": "notify_on_call_from_schedule",
+ "important": True,
+ }
+
+ client = APIClient()
+ url = reverse("api-public:escalation_policies-list")
+ response = client.post(url, data=data_for_create, format="json", HTTP_AUTHORIZATION=token)
+
+ escalation_policy = EscalationPolicy.objects.get(public_primary_key=response.data["id"])
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert escalation_policy.step == EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT
+ assert response.data["important"] is True
diff --git a/engine/apps/public_api/tests/test_incidents.py b/engine/apps/public_api/tests/test_incidents.py
new file mode 100644
index 0000000000..d43a1fb849
--- /dev/null
+++ b/engine/apps/public_api/tests/test_incidents.py
@@ -0,0 +1,196 @@
+from unittest import mock
+
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.alerts.models import AlertGroup, AlertReceiveChannel
+
+
+def construct_expected_response_from_incidents(incidents):
+ results = []
+ for incident in incidents:
+ # convert datetimes to serializers.DateTimeField
+ created_at = None
+ if incident.started_at:
+ created_at = incident.started_at.isoformat()
+ created_at = created_at[:-6] + "Z"
+
+ resolved_at = None
+ if incident.resolved_at:
+ resolved_at = incident.resolved_at.isoformat()
+ resolved_at = resolved_at[:-6] + "Z"
+
+ acknowledged_at = None
+ if incident.acknowledged_at:
+ acknowledged_at = incident.acknowledged_at.isoformat()
+ acknowledged_at = acknowledged_at[:-6] + "Z"
+
+ results.append(
+ {
+ "id": incident.public_primary_key,
+ "integration_id": incident.channel.public_primary_key,
+ "route_id": incident.channel_filter.public_primary_key,
+ "alerts_count": incident.alerts_count,
+ "state": incident.state,
+ "created_at": created_at,
+ "resolved_at": resolved_at,
+ "acknowledged_at": acknowledged_at,
+ "title": None,
+ }
+ )
+ expected_response = {"count": incidents.count(), "next": None, "previous": None, "results": results}
+ return expected_response
+
+
+@pytest.fixture()
+def incident_public_api_setup(
+ make_organization_and_user_with_token,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_alert_group,
+ make_alert,
+):
+ organization, user, token = make_organization_and_user_with_token()
+ grafana = make_alert_receive_channel(organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA)
+ formatted_webhook = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_FORMATTED_WEBHOOK
+ )
+
+ grafana_default_route = make_channel_filter(grafana, is_default=True)
+ grafana_non_default_route = make_channel_filter(grafana, filtering_term="us-east")
+ formatted_webhook_default_route = make_channel_filter(formatted_webhook, is_default=True)
+
+ grafana_incident_default_route = make_alert_group(grafana, channel_filter=grafana_default_route)
+ grafana_incident_non_default_route = make_alert_group(grafana, channel_filter=grafana_non_default_route)
+ formatted_webhook_incident = make_alert_group(formatted_webhook, channel_filter=formatted_webhook_default_route)
+
+ make_alert(alert_group=grafana_incident_default_route, raw_request_data=grafana.config.example_payload)
+ make_alert(alert_group=grafana_incident_non_default_route, raw_request_data=grafana.config.example_payload)
+ make_alert(alert_group=formatted_webhook_incident, raw_request_data=grafana.config.example_payload)
+
+ integrations = grafana, formatted_webhook
+ incidents = grafana_incident_default_route, grafana_incident_non_default_route, formatted_webhook_incident
+ routes = grafana_default_route, grafana_non_default_route, formatted_webhook_default_route
+
+ return token, incidents, integrations, routes
+
+
+@pytest.mark.django_db
+def test_get_incidents(incident_public_api_setup):
+ token, _, _, _ = incident_public_api_setup
+ incidents = AlertGroup.unarchived_objects.all().order_by("-started_at")
+ client = APIClient()
+ expected_response = construct_expected_response_from_incidents(incidents)
+
+ url = reverse("api-public:alert_groups-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_get_incidents_filter_by_integration(
+ incident_public_api_setup,
+):
+ token, incidents, integrations, _ = incident_public_api_setup
+ formatted_webhook = integrations[1]
+ incidents = AlertGroup.unarchived_objects.filter(channel=formatted_webhook).order_by("-started_at")
+ expected_response = construct_expected_response_from_incidents(incidents)
+ client = APIClient()
+
+ url = reverse("api-public:alert_groups-list")
+ response = client.get(
+ url + f"?integration_id={formatted_webhook.public_primary_key}", format="json", HTTP_AUTHORIZATION=f"{token}"
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_get_incidents_filter_by_integration_no_result(
+ incident_public_api_setup,
+):
+ token, _, _, _ = incident_public_api_setup
+ client = APIClient()
+
+ url = reverse("api-public:alert_groups-list")
+ response = client.get(url + "?integration_id=impossible_integration", format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["results"] == []
+
+
+@pytest.mark.django_db
+def test_get_incidents_filter_by_route(
+ incident_public_api_setup,
+):
+ token, incidents, integrations, routes = incident_public_api_setup
+ grafana_non_default_route = routes[1]
+ incidents = AlertGroup.unarchived_objects.filter(channel_filter=grafana_non_default_route).order_by("-started_at")
+ expected_response = construct_expected_response_from_incidents(incidents)
+ client = APIClient()
+
+ url = reverse("api-public:alert_groups-list")
+ response = client.get(
+ url + f"?route_id={grafana_non_default_route.public_primary_key}", format="json", HTTP_AUTHORIZATION=f"{token}"
+ )
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_get_incidents_filter_by_route_no_result(
+ incident_public_api_setup,
+):
+ token, _, _, _ = incident_public_api_setup
+ client = APIClient()
+
+ url = reverse("api-public:alert_groups-list")
+ response = client.get(url + "?route_id=impossible_route_ir", format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["results"] == []
+
+
+@mock.patch("apps.alerts.tasks.delete_alert_group.apply_async", return_value=None)
+@pytest.mark.django_db
+def test_delete_incident_success_response(mocked_task, incident_public_api_setup):
+ token, incidents, _, _ = incident_public_api_setup
+ grafana_first_incident = incidents[0]
+ client = APIClient()
+
+ url = reverse("api-public:alert_groups-detail", kwargs={"pk": grafana_first_incident.public_primary_key})
+ data = {"mode": "delete"}
+ response = client.delete(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+ assert mocked_task.call_count == 1
+
+
+@pytest.mark.django_db
+def test_delete_incident_invalid_request(incident_public_api_setup):
+ token, incidents, _, _ = incident_public_api_setup
+ grafana_first_incident = incidents[0]
+ client = APIClient()
+
+ url = reverse("api-public:alert_groups-detail", kwargs={"pk": grafana_first_incident.public_primary_key})
+ data = "delete"
+ response = client.delete(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+# This is test from old django-based tests
+# TODO: uncomment with date checking in delete mode
+# def test_delete_incident_invalid_date(self):
+# not_valid_creation_date = VALID_DATE_FOR_DELETE_INCIDENT - timezone.timedelta(days=1)
+# self.grafana_second_alert_group.started_at = not_valid_creation_date
+# self.grafana_second_alert_group.save()
+#
+# url = reverse("api-public:alert_groups-detail", kwargs={'pk': self.grafana_second_alert_group.public_primary_key})
+# data = {"mode": "delete"}
+# response = self.client.delete(url, data=data, format="json", HTTP_AUTHORIZATION=f"{self.token}")
+# self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
diff --git a/engine/apps/public_api/tests/test_integrations.py b/engine/apps/public_api/tests/test_integrations.py
new file mode 100644
index 0000000000..a035241d95
--- /dev/null
+++ b/engine/apps/public_api/tests/test_integrations.py
@@ -0,0 +1,491 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+
+@pytest.mark.django_db
+def test_get_list_integrations(
+ make_organization_and_user_with_token,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_integration_heartbeat,
+):
+ organization, user, token = make_organization_and_user_with_token()
+ integration = make_alert_receive_channel(organization, verbal_name="grafana")
+ default_channel_filter = make_channel_filter(integration, is_default=True)
+ make_integration_heartbeat(integration)
+
+ client = APIClient()
+ expected_response = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": integration.public_primary_key,
+ "team_id": None,
+ "name": "grafana",
+ "link": integration.integration_url,
+ "type": "grafana",
+ "default_route": {
+ "escalation_chain_id": None,
+ "id": default_channel_filter.public_primary_key,
+ "slack": {"channel_id": None},
+ },
+ "heartbeat": {
+ "link": f"{integration.integration_url}heartbeat/",
+ },
+ "templates": {
+ "grouping_key": None,
+ "resolve_signal": None,
+ "acknowledge_signal": None,
+ "slack": {"title": None, "message": None, "image_url": None},
+ "web": {"title": None, "message": None, "image_url": None},
+ "sms": {
+ "title": None,
+ },
+ "phone_call": {
+ "title": None,
+ },
+ "email": {
+ "title": None,
+ "message": None,
+ },
+ "telegram": {
+ "title": None,
+ "message": None,
+ "image_url": None,
+ },
+ },
+ "maintenance_mode": None,
+ "maintenance_started_at": None,
+ "maintenance_end_at": None,
+ }
+ ],
+ }
+ url = reverse("api-public:integrations-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_create_integration(
+ make_organization_and_user_with_token,
+ make_escalation_chain,
+):
+ organization, _, token = make_organization_and_user_with_token()
+ make_escalation_chain(organization)
+
+ client = APIClient()
+ data_for_create = {
+ "type": "grafana",
+ "name": "grafana_created",
+ "team_id": None,
+ }
+ url = reverse("api-public:integrations-list")
+ response = client.post(url, data=data_for_create, format="json", HTTP_AUTHORIZATION=f"{token}")
+ assert response.status_code == status.HTTP_201_CREATED
+
+
+@pytest.mark.django_db
+def test_create_integrations_with_none_templates(
+ make_organization_and_user_with_token,
+ make_escalation_chain,
+):
+ organization, _, token = make_organization_and_user_with_token()
+ make_escalation_chain(organization)
+
+ client = APIClient()
+ data_for_create = {
+ "type": "grafana",
+ "team_id": None,
+ "name": "grafana_created",
+ "templates": {
+ "grouping_key": None,
+ "resolve_signal": None,
+ "acknowledge_signal": None,
+ "slack": None,
+ "web": None,
+ "sms": None,
+ "phone_call": None,
+ "email": None,
+ "telegram": None,
+ },
+ }
+
+ url = reverse("api-public:integrations-list")
+
+ response = client.post(url, data=data_for_create, format="json", HTTP_AUTHORIZATION=f"{token}")
+ assert response.status_code == status.HTTP_201_CREATED
+
+
+@pytest.mark.django_db
+def test_create_integration_with_invalid_type(
+ make_organization_and_user_with_token,
+):
+ _, _, token = make_organization_and_user_with_token()
+
+ client = APIClient()
+ data_for_create = {
+ "type": "this_is_invalid_integration_type",
+ "name": "grafana_created",
+ "team_id": None,
+ }
+ url = reverse("api-public:integrations-list")
+ response = client.post(url, data=data_for_create, format="json", HTTP_AUTHORIZATION=f"{token}")
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_update_integration_template(
+ make_organization_and_user_with_token, make_alert_receive_channel, make_channel_filter, make_integration_heartbeat
+):
+ organization, user, token = make_organization_and_user_with_token()
+ integration = make_alert_receive_channel(organization, verbal_name="grafana")
+ default_channel_filter = make_channel_filter(integration, is_default=True)
+ make_integration_heartbeat(integration)
+
+ client = APIClient()
+ data_for_update = {"templates": {"grouping_key": "ip_addr", "slack": {"title": "Incident"}}}
+ expected_response = {
+ "id": integration.public_primary_key,
+ "team_id": None,
+ "name": "grafana",
+ "link": integration.integration_url,
+ "type": "grafana",
+ "default_route": {
+ "escalation_chain_id": None,
+ "id": default_channel_filter.public_primary_key,
+ "slack": {"channel_id": None},
+ },
+ "heartbeat": {
+ "link": f"{integration.integration_url}heartbeat/",
+ },
+ "templates": {
+ "grouping_key": "ip_addr",
+ "resolve_signal": None,
+ "acknowledge_signal": None,
+ "slack": {"title": "Incident", "message": None, "image_url": None},
+ "web": {"title": None, "message": None, "image_url": None},
+ "sms": {
+ "title": None,
+ },
+ "phone_call": {
+ "title": None,
+ },
+ "email": {
+ "title": None,
+ "message": None,
+ },
+ "telegram": {
+ "title": None,
+ "message": None,
+ "image_url": None,
+ },
+ },
+ "maintenance_mode": None,
+ "maintenance_started_at": None,
+ "maintenance_end_at": None,
+ }
+ url = reverse("api-public:integrations-detail", args=[integration.public_primary_key])
+ response = client.put(url, data=data_for_update, format="json", HTTP_AUTHORIZATION=f"{token}")
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_response
+
+
+@pytest.mark.django_db
+def test_update_invalid_integration_template(
+ make_organization_and_user_with_token, make_alert_receive_channel, make_channel_filter, make_integration_heartbeat
+):
+ organization, user, token = make_organization_and_user_with_token()
+ integration = make_alert_receive_channel(organization, verbal_name="grafana")
+ make_channel_filter(integration, is_default=True)
+ make_integration_heartbeat(integration)
+
+ client = APIClient()
+ data_for_update = {"templates": {"slack": {"title": "{% invalid jinja template }}"}}}
+ url = reverse("api-public:integrations-detail", args=[integration.public_primary_key])
+ response = client.put(url, data=data_for_update, format="json", HTTP_AUTHORIZATION=f"{token}")
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_update_resolve_signal_template(
+ make_organization_and_user_with_token, make_alert_receive_channel, make_channel_filter, make_integration_heartbeat
+):
+ organization, user, token = make_organization_and_user_with_token()
+ integration = make_alert_receive_channel(organization, verbal_name="grafana")
+ default_channel_filter = make_channel_filter(integration, is_default=True)
+ make_integration_heartbeat(integration)
+
+ client = APIClient()
+ data_for_update = {"templates": {"resolve_signal": "resig"}}
+ expected_response = {
+ "id": integration.public_primary_key,
+ "team_id": None,
+ "name": "grafana",
+ "link": integration.integration_url,
+ "type": "grafana",
+ "default_route": {
+ "escalation_chain_id": None,
+ "id": default_channel_filter.public_primary_key,
+ "slack": {"channel_id": None},
+ },
+ "heartbeat": {
+ "link": f"{integration.integration_url}heartbeat/",
+ },
+ "templates": {
+ "grouping_key": None,
+ "resolve_signal": "resig",
+ "acknowledge_signal": None,
+ "slack": {"title": None, "message": None, "image_url": None},
+ "web": {"title": None, "message": None, "image_url": None},
+ "sms": {
+ "title": None,
+ },
+ "phone_call": {
+ "title": None,
+ },
+ "email": {
+ "title": None,
+ "message": None,
+ },
+ "telegram": {
+ "title": None,
+ "message": None,
+ "image_url": None,
+ },
+ },
+ "maintenance_mode": None,
+ "maintenance_started_at": None,
+ "maintenance_end_at": None,
+ }
+ url = reverse("api-public:integrations-detail", args=[integration.public_primary_key])
+ response = client.put(url, data=data_for_update, format="json", HTTP_AUTHORIZATION=f"{token}")
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_response
+
+
+@pytest.mark.django_db
+def test_update_invalid_resolve_signal_template(
+ make_organization_and_user_with_token, make_alert_receive_channel, make_channel_filter, make_integration_heartbeat
+):
+ organization, user, token = make_organization_and_user_with_token()
+ integration = make_alert_receive_channel(organization, verbal_name="grafana")
+ make_channel_filter(integration, is_default=True)
+ make_integration_heartbeat(integration)
+
+ client = APIClient()
+ data_for_update = {"templates": {"resolve_signal": "{% invalid jinja template }}"}}
+ url = reverse("api-public:integrations-detail", args=[integration.public_primary_key])
+ response = client.put(url, data=data_for_update, format="json", HTTP_AUTHORIZATION=f"{token}")
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_update_empty_grouping_key_template(
+ make_organization_and_user_with_token, make_alert_receive_channel, make_channel_filter, make_integration_heartbeat
+):
+ organization, user, token = make_organization_and_user_with_token()
+ integration = make_alert_receive_channel(organization, verbal_name="grafana")
+ make_channel_filter(integration, is_default=True)
+ make_integration_heartbeat(integration)
+
+ client = APIClient()
+ data_for_update = {"templates": {"grouping_key": {}}}
+ url = reverse("api-public:integrations-detail", args=[integration.public_primary_key])
+ response = client.put(url, data=data_for_update, format="json", HTTP_AUTHORIZATION=f"{token}")
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_update_invalid_flat_web_template(
+ make_organization_and_user_with_token, make_alert_receive_channel, make_channel_filter, make_integration_heartbeat
+):
+ organization, user, token = make_organization_and_user_with_token()
+ integration = make_alert_receive_channel(organization, verbal_name="grafana")
+ make_channel_filter(integration, is_default=True)
+ make_integration_heartbeat(integration)
+
+ client = APIClient()
+ data_for_update = {"templates": {"web": "invalid_web_template"}}
+ url = reverse("api-public:integrations-detail", args=[integration.public_primary_key])
+ response = client.put(url, data=data_for_update, format="json", HTTP_AUTHORIZATION=f"{token}")
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_update_sms_template_with_empty_dict(
+ make_organization_and_user_with_token, make_alert_receive_channel, make_channel_filter, make_integration_heartbeat
+):
+ organization, user, token = make_organization_and_user_with_token()
+ integration = make_alert_receive_channel(organization, verbal_name="grafana")
+ default_channel_filter = make_channel_filter(integration, is_default=True)
+ make_integration_heartbeat(integration)
+
+ client = APIClient()
+ data_for_update = {"templates": {"sms": {}}}
+ expected_response = {
+ "id": integration.public_primary_key,
+ "team_id": None,
+ "name": "grafana",
+ "link": integration.integration_url,
+ "type": "grafana",
+ "default_route": {
+ "escalation_chain_id": None,
+ "id": default_channel_filter.public_primary_key,
+ "slack": {"channel_id": None},
+ },
+ "heartbeat": {
+ "link": f"{integration.integration_url}heartbeat/",
+ },
+ "templates": {
+ "grouping_key": None,
+ "resolve_signal": None,
+ "acknowledge_signal": None,
+ "slack": {"title": None, "message": None, "image_url": None},
+ "web": {"title": None, "message": None, "image_url": None},
+ "sms": {
+ "title": None,
+ },
+ "phone_call": {
+ "title": None,
+ },
+ "email": {
+ "title": None,
+ "message": None,
+ },
+ "telegram": {
+ "title": None,
+ "message": None,
+ "image_url": None,
+ },
+ },
+ "maintenance_mode": None,
+ "maintenance_started_at": None,
+ "maintenance_end_at": None,
+ }
+ url = reverse("api-public:integrations-detail", args=[integration.public_primary_key])
+ response = client.put(url, data=data_for_update, format="json", HTTP_AUTHORIZATION=f"{token}")
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_response
+
+
+@pytest.mark.django_db
+def test_update_integration_name(
+ make_organization_and_user_with_token, make_alert_receive_channel, make_channel_filter, make_integration_heartbeat
+):
+ organization, user, token = make_organization_and_user_with_token()
+ integration = make_alert_receive_channel(organization, verbal_name="grafana")
+ default_channel_filter = make_channel_filter(integration, is_default=True)
+ make_integration_heartbeat(integration)
+
+ client = APIClient()
+ data_for_update = {"name": "grafana_updated"}
+ expected_response = {
+ "id": integration.public_primary_key,
+ "team_id": None,
+ "name": "grafana_updated",
+ "link": integration.integration_url,
+ "type": "grafana",
+ "default_route": {
+ "escalation_chain_id": None,
+ "id": default_channel_filter.public_primary_key,
+ "slack": {"channel_id": None},
+ },
+ "heartbeat": {
+ "link": f"{integration.integration_url}heartbeat/",
+ },
+ "templates": {
+ "grouping_key": None,
+ "resolve_signal": None,
+ "acknowledge_signal": None,
+ "slack": {"title": None, "message": None, "image_url": None},
+ "web": {"title": None, "message": None, "image_url": None},
+ "sms": {
+ "title": None,
+ },
+ "phone_call": {
+ "title": None,
+ },
+ "email": {
+ "title": None,
+ "message": None,
+ },
+ "telegram": {
+ "title": None,
+ "message": None,
+ "image_url": None,
+ },
+ },
+ "maintenance_mode": None,
+ "maintenance_started_at": None,
+ "maintenance_end_at": None,
+ }
+ url = reverse("api-public:integrations-detail", args=[integration.public_primary_key])
+ response = client.put(url, data=data_for_update, format="json", HTTP_AUTHORIZATION=f"{token}")
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_response
+
+
+@pytest.mark.django_db
+def test_set_default_template(
+ make_organization_and_user_with_token, make_alert_receive_channel, make_channel_filter, make_integration_heartbeat
+):
+ organization, user, token = make_organization_and_user_with_token()
+ integration = make_alert_receive_channel(organization, verbal_name="grafana")
+ integration.slack_title_template = "updated_template"
+ integration.grouping_id_template = "updated_template"
+ integration.save()
+ default_channel_filter = make_channel_filter(integration, is_default=True)
+ make_integration_heartbeat(integration)
+
+ client = APIClient()
+ data_for_update = {"templates": {"grouping_key": None, "slack": {"title": None}}}
+ expected_response = {
+ "id": integration.public_primary_key,
+ "team_id": None,
+ "name": "grafana",
+ "link": integration.integration_url,
+ "type": "grafana",
+ "default_route": {
+ "escalation_chain_id": None,
+ "id": default_channel_filter.public_primary_key,
+ "slack": {"channel_id": None},
+ },
+ "heartbeat": {
+ "link": f"{integration.integration_url}heartbeat/",
+ },
+ "templates": {
+ "grouping_key": None,
+ "resolve_signal": None,
+ "acknowledge_signal": None,
+ "slack": {"title": None, "message": None, "image_url": None},
+ "web": {"title": None, "message": None, "image_url": None},
+ "sms": {
+ "title": None,
+ },
+ "phone_call": {
+ "title": None,
+ },
+ "email": {
+ "title": None,
+ "message": None,
+ },
+ "telegram": {
+ "title": None,
+ "message": None,
+ "image_url": None,
+ },
+ },
+ "maintenance_mode": None,
+ "maintenance_started_at": None,
+ "maintenance_end_at": None,
+ }
+ url = reverse("api-public:integrations-detail", args=[integration.public_primary_key])
+ response = client.put(url, data=data_for_update, format="json", HTTP_AUTHORIZATION=f"{token}")
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_response
diff --git a/engine/apps/public_api/tests/test_maintenance.py b/engine/apps/public_api/tests/test_maintenance.py
new file mode 100644
index 0000000000..02e1af02b9
--- /dev/null
+++ b/engine/apps/public_api/tests/test_maintenance.py
@@ -0,0 +1,48 @@
+import dateutil.parser
+import pytest
+from django.urls import reverse
+from django.utils import timezone
+from rest_framework.test import APIClient
+
+
+@pytest.mark.django_db
+def test_start_and_stop_maintenance_for_integration(
+ make_organization_and_user_with_token, make_alert_receive_channel, make_escalation_chain
+):
+ organization, user, token = make_organization_and_user_with_token()
+ integration = make_alert_receive_channel(organization)
+ make_escalation_chain(organization)
+
+ client = APIClient()
+ url = reverse("api-public:integrations-detail", args=[integration.public_primary_key])
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ # Make sure there is no Maintenance
+ assert response.json()["maintenance_mode"] is None
+ assert response.json()["maintenance_started_at"] is None
+ assert response.json()["maintenance_end_at"] is None
+
+ # Starting maintenance
+ client.post(
+ url + "/maintenance_start/",
+ data={
+ "mode": "Maintenance",
+ "duration": 100,
+ },
+ format="json",
+ HTTP_AUTHORIZATION=f"{token}",
+ )
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+ assert response.json()["maintenance_mode"] == "maintenance"
+ assert dateutil.parser.parse(response.json()["maintenance_end_at"]) - dateutil.parser.parse(
+ response.json()["maintenance_started_at"]
+ ) == timezone.timedelta(seconds=100)
+
+ # Ending maintenance
+ client.post(url + "/maintenance_stop/", format="json", HTTP_AUTHORIZATION=f"{token}")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.json()["maintenance_mode"] is None
+ assert response.json()["maintenance_started_at"] is None
+ assert response.json()["maintenance_end_at"] is None
diff --git a/engine/apps/public_api/tests/test_on_call_shifts.py b/engine/apps/public_api/tests/test_on_call_shifts.py
new file mode 100644
index 0000000000..c5bb99d1ac
--- /dev/null
+++ b/engine/apps/public_api/tests/test_on_call_shifts.py
@@ -0,0 +1,239 @@
+import datetime
+
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.schedules.models import CustomOnCallShift, OnCallScheduleCalendar
+
+invalid_field_data_1 = {
+ "frequency": None,
+}
+
+invalid_field_data_2 = {
+ "start": datetime.datetime.now(),
+}
+
+invalid_field_data_3 = {
+ "by_day": ["QQ", "FR"],
+}
+
+invalid_field_data_4 = {
+ "by_month": [13],
+}
+
+invalid_field_data_5 = {
+ "by_monthday": [35],
+}
+
+invalid_field_data_6 = {
+ "interval": 0,
+}
+
+invalid_field_data_7 = {
+ "type": "invalid_type",
+}
+
+
+@pytest.mark.django_db
+def test_get_on_call_shift(make_organization_and_user_with_token, make_on_call_shift, make_schedule):
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ data = {
+ "start": datetime.datetime.now().replace(microsecond=0),
+ "duration": datetime.timedelta(seconds=7200),
+ }
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_SINGLE_EVENT, **data
+ )
+ on_call_shift.users.add(user)
+ schedule.custom_on_call_shifts.add(on_call_shift)
+
+ url = reverse("api-public:on_call_shifts-detail", kwargs={"pk": on_call_shift.public_primary_key})
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ result = {
+ "id": on_call_shift.public_primary_key,
+ "team_id": None,
+ "name": on_call_shift.name,
+ "type": "single_event",
+ "time_zone": None,
+ "level": 0,
+ "start": on_call_shift.start.strftime("%Y-%m-%dT%H:%M:%S"),
+ "duration": int(on_call_shift.duration.total_seconds()),
+ "users": [user.public_primary_key],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == result
+
+
+@pytest.mark.django_db
+def test_create_on_call_shift(make_organization_and_user_with_token):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ url = reverse("api-public:on_call_shifts-list")
+
+ data = {
+ "team_id": None,
+ "name": "test name",
+ "type": "recurrent_event",
+ "level": 1,
+ "start": datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
+ "duration": 10800,
+ "users": [user.public_primary_key],
+ "week_start": "MO",
+ "frequency": "weekly",
+ "interval": 2,
+ "by_day": ["MO", "WE", "FR"],
+ }
+
+ response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+ on_call_shift = CustomOnCallShift.objects.get(public_primary_key=response.data["id"])
+
+ result = {
+ "id": on_call_shift.public_primary_key,
+ "team_id": None,
+ "name": data["name"],
+ "type": "recurrent_event",
+ "time_zone": None,
+ "level": data["level"],
+ "start": data["start"],
+ "duration": data["duration"],
+ "frequency": data["frequency"],
+ "interval": data["interval"],
+ "week_start": data["week_start"],
+ "by_day": data["by_day"],
+ "users": [user.public_primary_key],
+ "by_month": None,
+ "by_monthday": None,
+ }
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.data == result
+
+
+@pytest.mark.django_db
+def test_update_on_call_shift(make_organization_and_user_with_token, make_on_call_shift, make_schedule):
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ data = {
+ "start": datetime.datetime.now().replace(microsecond=0),
+ "duration": datetime.timedelta(seconds=7200),
+ "frequency": CustomOnCallShift.FREQUENCY_WEEKLY,
+ "interval": 2,
+ "by_day": ["MO", "FR"],
+ }
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_RECURRENT_EVENT, **data
+ )
+ schedule.custom_on_call_shifts.add(on_call_shift)
+
+ url = reverse("api-public:on_call_shifts-detail", kwargs={"pk": on_call_shift.public_primary_key})
+
+ data_to_update = {
+ "duration": 14400,
+ "users": [user.public_primary_key],
+ "by_day": ["MO", "WE", "FR"],
+ }
+
+ assert int(on_call_shift.duration.total_seconds()) != data_to_update["duration"]
+ assert on_call_shift.by_day != data_to_update["by_day"]
+ assert len(on_call_shift.users.filter(public_primary_key=user.public_primary_key)) == 0
+
+ response = client.put(url, data=data_to_update, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ result = {
+ "id": on_call_shift.public_primary_key,
+ "team_id": None,
+ "name": on_call_shift.name,
+ "type": "recurrent_event",
+ "time_zone": None,
+ "level": 0,
+ "start": on_call_shift.start.strftime("%Y-%m-%dT%H:%M:%S"),
+ "duration": data_to_update["duration"],
+ "frequency": "weekly",
+ "interval": on_call_shift.interval,
+ "week_start": "SU",
+ "by_day": data_to_update["by_day"],
+ "users": [user.public_primary_key],
+ "by_month": None,
+ "by_monthday": None,
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ on_call_shift.refresh_from_db()
+
+ assert int(on_call_shift.duration.total_seconds()) == data_to_update["duration"]
+ assert on_call_shift.by_day == data_to_update["by_day"]
+ assert len(on_call_shift.users.filter(public_primary_key=user.public_primary_key)) == 1
+ assert response.data == result
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "data_to_update",
+ [
+ invalid_field_data_1,
+ invalid_field_data_2,
+ invalid_field_data_3,
+ invalid_field_data_4,
+ invalid_field_data_5,
+ invalid_field_data_6,
+ invalid_field_data_7,
+ ],
+)
+def test_update_on_call_shift_invalid_field(make_organization_and_user_with_token, make_on_call_shift, data_to_update):
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ data = {
+ "start": datetime.datetime.now().replace(microsecond=0),
+ "duration": datetime.timedelta(seconds=7200),
+ "frequency": CustomOnCallShift.FREQUENCY_WEEKLY,
+ "interval": 2,
+ "by_day": ["MO", "FR"],
+ }
+
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_RECURRENT_EVENT, **data
+ )
+
+ url = reverse("api-public:on_call_shifts-detail", kwargs={"pk": on_call_shift.public_primary_key})
+
+ response = client.put(url, data=data_to_update, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_delete_on_call_shift(make_organization_and_user_with_token, make_on_call_shift):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ data = {
+ "start": datetime.datetime.now().replace(microsecond=0),
+ "duration": datetime.timedelta(seconds=7200),
+ }
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_SINGLE_EVENT, **data
+ )
+
+ url = reverse("api-public:on_call_shifts-detail", kwargs={"pk": on_call_shift.public_primary_key})
+
+ response = client.delete(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+
+ with pytest.raises(CustomOnCallShift.DoesNotExist):
+ on_call_shift.refresh_from_db()
diff --git a/engine/apps/public_api/tests/test_personal_notification_rules.py b/engine/apps/public_api/tests/test_personal_notification_rules.py
new file mode 100644
index 0000000000..7409a3f2e1
--- /dev/null
+++ b/engine/apps/public_api/tests/test_personal_notification_rules.py
@@ -0,0 +1,323 @@
+import pytest
+from django.urls import reverse
+from django.utils import timezone
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.base.models import UserNotificationPolicy
+from apps.base.models.user_notification_policy import NotificationChannelPublicAPIOptions
+
+TYPE_WAIT = "wait"
+
+
+@pytest.fixture()
+def personal_notification_rule_public_api_setup(
+ make_organization_and_user_with_token,
+ make_user_notification_policy,
+):
+ organization, user, token = make_organization_and_user_with_token()
+ notification_rule_wait = make_user_notification_policy(
+ user, wait_delay=UserNotificationPolicy.FIVE_MINUTES, step=UserNotificationPolicy.Step.WAIT
+ )
+ notification_rule_phone_call = make_user_notification_policy(
+ user, notify_by=UserNotificationPolicy.NotificationChannel.PHONE_CALL, step=UserNotificationPolicy.Step.NOTIFY
+ )
+ notification_rule_important = make_user_notification_policy(
+ user,
+ notify_by=UserNotificationPolicy.NotificationChannel.PHONE_CALL,
+ step=UserNotificationPolicy.Step.NOTIFY,
+ important=True,
+ )
+ return organization, user, token, notification_rule_wait, notification_rule_phone_call, notification_rule_important
+
+
+@pytest.mark.django_db
+def test_get_personal_notification_rule(personal_notification_rule_public_api_setup):
+ _, user, token, _, notification_rule_phone_call, _ = personal_notification_rule_public_api_setup
+
+ client = APIClient()
+
+ url = reverse(
+ "api-public:personal_notification_rules-detail", kwargs={"pk": notification_rule_phone_call.public_primary_key}
+ )
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ expected_response = {
+ "id": notification_rule_phone_call.public_primary_key,
+ "user_id": user.public_primary_key,
+ "type": NotificationChannelPublicAPIOptions.LABELS[notification_rule_phone_call.notify_by],
+ "position": notification_rule_phone_call.order,
+ "important": False,
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_get_personal_notification_rules_list(personal_notification_rule_public_api_setup):
+ (
+ _,
+ user,
+ token,
+ notification_rule_wait,
+ notification_rule_phone_call,
+ notification_rule_important,
+ ) = personal_notification_rule_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:personal_notification_rules-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ expected_response = {
+ "count": 3,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": notification_rule_wait.public_primary_key,
+ "user_id": user.public_primary_key,
+ "type": TYPE_WAIT,
+ "duration": timezone.timedelta(seconds=300).seconds,
+ "position": notification_rule_wait.order,
+ "important": False,
+ },
+ {
+ "id": notification_rule_phone_call.public_primary_key,
+ "user_id": user.public_primary_key,
+ "type": NotificationChannelPublicAPIOptions.LABELS[notification_rule_phone_call.notify_by],
+ "position": notification_rule_phone_call.order,
+ "important": False,
+ },
+ {
+ "id": notification_rule_important.public_primary_key,
+ "user_id": user.public_primary_key,
+ "type": NotificationChannelPublicAPIOptions.LABELS[notification_rule_important.notify_by],
+ "position": notification_rule_important.order,
+ "important": True,
+ },
+ ],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_get_personal_notification_rules_list_important(personal_notification_rule_public_api_setup):
+ _, user, token, _, _, notification_rule_important = personal_notification_rule_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:personal_notification_rules-list")
+ response = client.get(url + "?important=true", format="json", HTTP_AUTHORIZATION=token)
+
+ expected_response = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": notification_rule_important.public_primary_key,
+ "user_id": user.public_primary_key,
+ "type": NotificationChannelPublicAPIOptions.LABELS[notification_rule_important.notify_by],
+ "position": notification_rule_important.order,
+ "important": True,
+ }
+ ],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_get_personal_notification_rules_list_non_important(personal_notification_rule_public_api_setup):
+ (
+ _,
+ user,
+ token,
+ notification_rule_wait,
+ notification_rule_phone_call,
+ _,
+ ) = personal_notification_rule_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:personal_notification_rules-list")
+ response = client.get(url + "?important=false", format="json", HTTP_AUTHORIZATION=token)
+
+ expected_response = {
+ "count": 2,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": notification_rule_wait.public_primary_key,
+ "user_id": user.public_primary_key,
+ "type": TYPE_WAIT,
+ "duration": timezone.timedelta(seconds=300).seconds,
+ "position": notification_rule_wait.order,
+ "important": False,
+ },
+ {
+ "id": notification_rule_phone_call.public_primary_key,
+ "user_id": user.public_primary_key,
+ "type": NotificationChannelPublicAPIOptions.LABELS[notification_rule_phone_call.notify_by],
+ "position": notification_rule_phone_call.order,
+ "important": False,
+ },
+ ],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_update_personal_notification_rule(personal_notification_rule_public_api_setup):
+ _, user, token, _, notification_rule_phone_call, _ = personal_notification_rule_public_api_setup
+
+ client = APIClient()
+
+ url = reverse(
+ "api-public:personal_notification_rules-detail", kwargs={"pk": notification_rule_phone_call.public_primary_key}
+ )
+
+ data_to_update = {
+ "type": NotificationChannelPublicAPIOptions.LABELS[UserNotificationPolicy.NotificationChannel.SMS]
+ }
+ assert notification_rule_phone_call.notify_by != UserNotificationPolicy.NotificationChannel.SMS
+ response = client.put(url, format="json", HTTP_AUTHORIZATION=token, data=data_to_update)
+
+ expected_response = {
+ "id": notification_rule_phone_call.public_primary_key,
+ "user_id": user.public_primary_key,
+ "type": data_to_update["type"],
+ "position": notification_rule_phone_call.order,
+ "important": False,
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_create_personal_notification_rule_wait(personal_notification_rule_public_api_setup):
+ _, user, token, _, _, _ = personal_notification_rule_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:personal_notification_rules-list")
+ data_for_create = {
+ "user_id": user.public_primary_key,
+ "type": TYPE_WAIT,
+ "position": 1,
+ "duration": timezone.timedelta(seconds=300).seconds,
+ }
+ response = client.post(url, format="json", HTTP_AUTHORIZATION=token, data=data_for_create)
+
+ expected_response = {
+ "id": response.data["id"],
+ "user_id": user.public_primary_key,
+ "type": TYPE_WAIT,
+ "duration": data_for_create["duration"],
+ "position": data_for_create["position"],
+ "important": False,
+ }
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.json() == expected_response
+
+ notification_rule = UserNotificationPolicy.objects.get(public_primary_key=response.data["id"])
+ assert notification_rule.step == UserNotificationPolicy.Step.WAIT
+
+
+@pytest.mark.django_db
+def test_create_personal_notification_rule_notify_by_sms(personal_notification_rule_public_api_setup):
+ _, user, token, _, _, _ = personal_notification_rule_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:personal_notification_rules-list")
+ data_for_create = {
+ "user_id": user.public_primary_key,
+ "type": NotificationChannelPublicAPIOptions.LABELS[UserNotificationPolicy.NotificationChannel.SMS],
+ "position": 1,
+ }
+ response = client.post(url, format="json", HTTP_AUTHORIZATION=token, data=data_for_create)
+
+ expected_response = {
+ "id": response.data["id"],
+ "user_id": user.public_primary_key,
+ "type": NotificationChannelPublicAPIOptions.LABELS[UserNotificationPolicy.NotificationChannel.SMS],
+ "position": data_for_create["position"],
+ "important": False,
+ }
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.json() == expected_response
+
+ notification_rule = UserNotificationPolicy.objects.get(public_primary_key=response.data["id"])
+ assert notification_rule.step == UserNotificationPolicy.Step.NOTIFY
+ assert notification_rule.notify_by == UserNotificationPolicy.NotificationChannel.SMS
+
+
+@pytest.mark.django_db
+def test_create_personal_notification_rule_notify_by_sms_important(personal_notification_rule_public_api_setup):
+ _, user, token, _, _, _ = personal_notification_rule_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:personal_notification_rules-list")
+ data_for_create = {
+ "user_id": user.public_primary_key,
+ "type": NotificationChannelPublicAPIOptions.LABELS[UserNotificationPolicy.NotificationChannel.SMS],
+ "position": 1,
+ "important": True,
+ }
+ response = client.post(url, format="json", HTTP_AUTHORIZATION=token, data=data_for_create)
+
+ expected_response = {
+ "id": response.data["id"],
+ "user_id": user.public_primary_key,
+ "type": NotificationChannelPublicAPIOptions.LABELS[UserNotificationPolicy.NotificationChannel.SMS],
+ "position": data_for_create["position"],
+ "important": data_for_create["important"],
+ }
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_create_personal_notification_rule_invalid_data(personal_notification_rule_public_api_setup):
+ _, user, token, _, _, _ = personal_notification_rule_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:personal_notification_rules-list")
+ data_for_create = {
+ "user_id": user.public_primary_key,
+ "type": "invalid_type",
+ "position": 1,
+ }
+ response = client.post(url, format="json", HTTP_AUTHORIZATION=token, data=data_for_create)
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_delete_personal_notification_rule(personal_notification_rule_public_api_setup):
+ _, user, token, notification_rule_wait, _, _ = personal_notification_rule_public_api_setup
+
+ client = APIClient()
+
+ url = reverse(
+ "api-public:personal_notification_rules-detail", kwargs={"pk": notification_rule_wait.public_primary_key}
+ )
+
+ response = client.delete(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_204_NO_CONTENT
diff --git a/engine/apps/public_api/tests/test_ratelimit.py b/engine/apps/public_api/tests/test_ratelimit.py
new file mode 100644
index 0000000000..eb71827c02
--- /dev/null
+++ b/engine/apps/public_api/tests/test_ratelimit.py
@@ -0,0 +1,33 @@
+from unittest.mock import patch
+
+import pytest
+from django.core.cache import cache
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+
+@patch("apps.public_api.throttlers.user_throttle.UserThrottle.get_throttle_limits")
+@pytest.mark.django_db
+def test_throttling(mocked_throttle_limits, make_organization_and_user_with_token):
+ MAX_REQUESTS = 1
+ PERIOD = 360
+
+ _, _, token = make_organization_and_user_with_token()
+ cache.clear()
+
+ client = APIClient()
+
+ mocked_throttle_limits.return_value = MAX_REQUESTS, PERIOD
+ url = reverse("api-public:alert_groups-list")
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_429_TOO_MANY_REQUESTS
+
+ # make sure RateLimitHeadersMixin used
+ assert response.has_header("RateLimit-Reset")
diff --git a/engine/apps/public_api/tests/test_resolution_notes.py b/engine/apps/public_api/tests/test_resolution_notes.py
new file mode 100644
index 0000000000..1f901ea748
--- /dev/null
+++ b/engine/apps/public_api/tests/test_resolution_notes.py
@@ -0,0 +1,221 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.alerts.models import ResolutionNote
+
+
+@pytest.mark.django_db
+def test_get_resolution_note(
+ make_organization_and_user_with_token,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_resolution_note,
+):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ resolution_note = make_resolution_note(
+ alert_group=alert_group,
+ source=ResolutionNote.Source.WEB,
+ author=user,
+ )
+
+ url = reverse("api-public:resolution_notes-detail", kwargs={"pk": resolution_note.public_primary_key})
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ result = {
+ "id": resolution_note.public_primary_key,
+ "alert_group_id": alert_group.public_primary_key,
+ "author": user.public_primary_key,
+ "source": resolution_note.get_source_display(),
+ "created_at": response.data["created_at"],
+ "text": resolution_note.text,
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == result
+
+
+@pytest.mark.django_db
+def test_create_resolution_note(make_organization_and_user_with_token, make_alert_receive_channel, make_alert_group):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ url = reverse("api-public:resolution_notes-list")
+
+ data = {
+ "alert_group_id": alert_group.public_primary_key,
+ "text": "Test Resolution Note Message",
+ }
+
+ response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ resolution_note = ResolutionNote.objects.get(public_primary_key=response.data["id"])
+
+ result = {
+ "id": resolution_note.public_primary_key,
+ "alert_group_id": alert_group.public_primary_key,
+ "author": user.public_primary_key,
+ "source": resolution_note.get_source_display(),
+ "created_at": response.data["created_at"],
+ "text": data["text"],
+ }
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.data == result
+
+
+@pytest.mark.django_db
+def test_create_resolution_note_invalid_text(
+ make_organization_and_user_with_token,
+ make_alert_receive_channel,
+ make_alert_group,
+):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ url = reverse("api-public:resolution_notes-list")
+
+ data = {
+ "alert_group_id": alert_group.public_primary_key,
+ "text": "",
+ }
+
+ response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+ assert response.data["text"][0] == "This field may not be blank."
+
+
+@pytest.mark.django_db
+def test_update_resolution_note(
+ make_organization_and_user_with_token,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_resolution_note,
+):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ resolution_note = make_resolution_note(
+ alert_group=alert_group,
+ source=ResolutionNote.Source.WEB,
+ author=user,
+ )
+
+ url = reverse("api-public:resolution_notes-detail", kwargs={"pk": resolution_note.public_primary_key})
+
+ data = {
+ "text": "Test Resolution Note Message",
+ }
+
+ assert resolution_note.text != data["text"]
+
+ response = client.put(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ result = {
+ "id": resolution_note.public_primary_key,
+ "alert_group_id": alert_group.public_primary_key,
+ "author": user.public_primary_key,
+ "source": resolution_note.get_source_display(),
+ "created_at": response.data["created_at"],
+ "text": data["text"],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ resolution_note.refresh_from_db()
+ assert resolution_note.text == result["text"]
+ assert response.data == result
+
+
+@pytest.mark.django_db
+def test_update_resolution_note_invalid_source(
+ make_organization_and_user_with_token,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_resolution_note,
+):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ resolution_note = make_resolution_note(
+ alert_group=alert_group,
+ source=ResolutionNote.Source.SLACK,
+ author=user,
+ )
+
+ url = reverse("api-public:resolution_notes-detail", kwargs={"pk": resolution_note.public_primary_key})
+
+ data = {
+ "text": "Test Resolution Note Message",
+ }
+
+ assert resolution_note.message_text != data["text"]
+
+ response = client.put(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+ resolution_note.refresh_from_db()
+ assert resolution_note.message_text != data["text"]
+ assert response.data["detail"] == "Cannot update message with this source type"
+
+
+@pytest.mark.django_db
+def test_delete_resolution_note(
+ make_organization_and_user_with_token,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_resolution_note,
+):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ resolution_note = make_resolution_note(
+ alert_group=alert_group,
+ source=ResolutionNote.Source.WEB,
+ author=user,
+ )
+
+ url = reverse("api-public:resolution_notes-detail", kwargs={"pk": resolution_note.public_primary_key})
+
+ assert resolution_note.deleted_at is None
+
+ response = client.delete(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+
+ resolution_note.refresh_from_db()
+
+ assert resolution_note.deleted_at is not None
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_404_NOT_FOUND
+ assert response.data["detail"] == "Not found."
diff --git a/engine/apps/public_api/tests/test_routes.py b/engine/apps/public_api/tests/test_routes.py
new file mode 100644
index 0000000000..b5502c8c78
--- /dev/null
+++ b/engine/apps/public_api/tests/test_routes.py
@@ -0,0 +1,226 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.alerts.models import ChannelFilter
+
+
+@pytest.fixture()
+def route_public_api_setup(
+ make_organization_and_user_with_token,
+ make_alert_receive_channel,
+ make_escalation_chain,
+ make_channel_filter,
+):
+ organization, user, token = make_organization_and_user_with_token()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ escalation_chain = make_escalation_chain(organization)
+ channel_filter = make_channel_filter(
+ alert_receive_channel,
+ is_default=True,
+ slack_channel_id="TEST_SLACK_ID",
+ escalation_chain=escalation_chain,
+ )
+ return organization, user, token, alert_receive_channel, escalation_chain, channel_filter
+
+
+@pytest.mark.django_db
+def test_get_route(
+ route_public_api_setup,
+):
+ _, _, token, alert_receive_channel, escalation_chain, channel_filter = route_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:routes-detail", kwargs={"pk": channel_filter.public_primary_key})
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ expected_response = {
+ "id": channel_filter.public_primary_key,
+ "integration_id": alert_receive_channel.public_primary_key,
+ "escalation_chain_id": escalation_chain.public_primary_key,
+ "routing_regex": channel_filter.filtering_term,
+ "position": channel_filter.order,
+ "is_the_last_route": channel_filter.is_default,
+ "slack": {"channel_id": channel_filter.slack_channel_id},
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_get_routes_list(
+ route_public_api_setup,
+):
+ _, _, token, alert_receive_channel, escalation_chain, channel_filter = route_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:routes-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ expected_response = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": channel_filter.public_primary_key,
+ "integration_id": alert_receive_channel.public_primary_key,
+ "escalation_chain_id": escalation_chain.public_primary_key,
+ "routing_regex": channel_filter.filtering_term,
+ "position": channel_filter.order,
+ "is_the_last_route": channel_filter.is_default,
+ "slack": {"channel_id": channel_filter.slack_channel_id},
+ }
+ ],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_get_routes_filter_by_integration_id(
+ route_public_api_setup,
+):
+ _, _, token, alert_receive_channel, escalation_chain, channel_filter = route_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:routes-list")
+ response = client.get(
+ url + f"?integration_id={alert_receive_channel.public_primary_key}", format="json", HTTP_AUTHORIZATION=token
+ )
+
+ expected_response = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": channel_filter.public_primary_key,
+ "integration_id": alert_receive_channel.public_primary_key,
+ "escalation_chain_id": escalation_chain.public_primary_key,
+ "routing_regex": channel_filter.filtering_term,
+ "position": channel_filter.order,
+ "is_the_last_route": channel_filter.is_default,
+ "slack": {"channel_id": channel_filter.slack_channel_id},
+ }
+ ],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_create_route(
+ route_public_api_setup,
+):
+ _, _, token, alert_receive_channel, escalation_chain, _ = route_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:routes-list")
+ data_for_create = {
+ "integration_id": alert_receive_channel.public_primary_key,
+ "routing_regex": "testreg",
+ "escalation_chain_id": escalation_chain.public_primary_key,
+ }
+ response = client.post(url, format="json", HTTP_AUTHORIZATION=token, data=data_for_create)
+
+ expected_response = {
+ "id": response.data["id"],
+ "integration_id": alert_receive_channel.public_primary_key,
+ "escalation_chain_id": escalation_chain.public_primary_key,
+ "routing_regex": data_for_create["routing_regex"],
+ "position": 0,
+ "is_the_last_route": False,
+ "slack": {"channel_id": None},
+ }
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_invalid_route_data(
+ route_public_api_setup,
+):
+ _, _, token, alert_receive_channel, escalation_chain, _ = route_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:routes-list")
+ data_for_create = {
+ "integration_id": alert_receive_channel.public_primary_key,
+ "routing_regex": None, # routing_regex cannot be null for non-default filters
+ "escalation_chain_id": escalation_chain.public_primary_key,
+ }
+ response = client.post(url, format="json", HTTP_AUTHORIZATION=token, data=data_for_create)
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_update_route(
+ route_public_api_setup,
+ make_channel_filter,
+):
+ _, _, token, alert_receive_channel, escalation_chain, _ = route_public_api_setup
+ new_channel_filter = make_channel_filter(
+ alert_receive_channel,
+ is_default=False,
+ filtering_term="testreg",
+ )
+
+ client = APIClient()
+
+ url = reverse("api-public:routes-detail", kwargs={"pk": new_channel_filter.public_primary_key})
+ data_to_update = {
+ "routing_regex": "testreg_updated",
+ "escalation_chain_id": escalation_chain.public_primary_key,
+ }
+
+ assert new_channel_filter.filtering_term != data_to_update["routing_regex"]
+ assert new_channel_filter.escalation_chain != escalation_chain
+
+ response = client.put(url, format="json", HTTP_AUTHORIZATION=token, data=data_to_update)
+
+ expected_response = {
+ "id": new_channel_filter.public_primary_key,
+ "integration_id": alert_receive_channel.public_primary_key,
+ "escalation_chain_id": escalation_chain.public_primary_key,
+ "routing_regex": data_to_update["routing_regex"],
+ "position": new_channel_filter.order,
+ "is_the_last_route": new_channel_filter.is_default,
+ "slack": {"channel_id": new_channel_filter.slack_channel_id},
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_delete_route(
+ route_public_api_setup,
+ make_channel_filter,
+):
+ _, _, token, alert_receive_channel, _, _ = route_public_api_setup
+ new_channel_filter = make_channel_filter(
+ alert_receive_channel,
+ is_default=False,
+ filtering_term="testreg",
+ )
+
+ client = APIClient()
+
+ url = reverse("api-public:routes-detail", kwargs={"pk": new_channel_filter.public_primary_key})
+ response = client.delete(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+ with pytest.raises(ChannelFilter.DoesNotExist):
+ new_channel_filter.refresh_from_db()
diff --git a/engine/apps/public_api/tests/test_schedule_export.py b/engine/apps/public_api/tests/test_schedule_export.py
new file mode 100644
index 0000000000..4d311d9843
--- /dev/null
+++ b/engine/apps/public_api/tests/test_schedule_export.py
@@ -0,0 +1,75 @@
+import pytest
+from django.urls import reverse
+from icalendar import Calendar
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.auth_token.models import ScheduleExportAuthToken, UserScheduleExportAuthToken
+from apps.schedules.models import OnCallScheduleICal
+
+ICAL_URL = "https://calendar.google.com/calendar/ical/c_6i1aprpgaqu89hqeelv7mrj264%40group.calendar.google.com/private-6a995cea6e74dd2cdc5d8c75bee06a2f/basic.ics" # noqa
+
+
+@pytest.mark.django_db
+def test_export_calendar(make_organization_and_user_with_token, make_schedule):
+
+ organization, user, _ = make_organization_and_user_with_token()
+
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ name="test_ical_schedule",
+ ical_url_primary=ICAL_URL,
+ )
+ _, schedule_token = ScheduleExportAuthToken.create_auth_token(
+ user=user, organization=organization, schedule=schedule
+ )
+
+ client = APIClient()
+
+ url = reverse("api-public:schedules-export", kwargs={"pk": schedule.public_primary_key})
+ url = url + "?token={0}".format(schedule_token)
+
+ response = client.get(url, format="text/calendar")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.headers["content-type"] == "text/calendar; charset=utf-8"
+
+ cal = Calendar.from_ical(response.data)
+
+ assert type(cal) == Calendar
+ assert len(cal.subcomponents) == 2
+
+
+@pytest.mark.django_db
+def test_export_user_calendar(make_organization_and_user_with_token, make_schedule):
+
+ organization, user, _ = make_organization_and_user_with_token()
+
+ # make a schedule so that one is available
+ make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ name="test_ical_schedule",
+ ical_url_primary=ICAL_URL,
+ )
+
+ _, schedule_token = UserScheduleExportAuthToken.create_auth_token(user=user, organization=organization)
+
+ url = reverse("api-public:users-schedule-export", kwargs={"pk": user.public_primary_key})
+ url = url + "?token={0}".format(schedule_token)
+
+ client = APIClient()
+
+ response = client.get(url, format="text/calendar")
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.headers["content-type"] == "text/calendar; charset=utf-8"
+
+ cal = Calendar.from_ical(response.data)
+
+ assert type(cal) == Calendar
+ assert cal.get("x-wr-calname") == "On-Call Schedule for {0}".format(user.username)
+ assert cal.get("x-wr-timezone") == "UTC"
+ assert cal.get("calscale") == "GREGORIAN"
+ assert cal.get("prodid") == "//Grafana Labs//Grafana On-Call//"
diff --git a/engine/apps/public_api/tests/test_schedules.py b/engine/apps/public_api/tests/test_schedules.py
new file mode 100644
index 0000000000..13777a1091
--- /dev/null
+++ b/engine/apps/public_api/tests/test_schedules.py
@@ -0,0 +1,526 @@
+from unittest.mock import patch
+
+import pytest
+from django.urls import reverse
+from django.utils import timezone
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from apps.schedules.models import CustomOnCallShift, OnCallSchedule, OnCallScheduleCalendar, OnCallScheduleICal
+
+ICAL_URL = "https://calendar.google.com/calendar/ical/amixr.io_37gttuakhrtr75ano72p69rt78%40group.calendar.google.com/private-1d00a680ba5be7426c3eb3ef1616e26d/basic.ics"
+
+
+@pytest.mark.django_db
+def test_get_calendar_schedule(
+ make_organization_and_user_with_token,
+ make_schedule,
+):
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ slack_channel_id = "SLACKCHANNELID"
+
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleCalendar,
+ channel=slack_channel_id,
+ )
+
+ url = reverse("api-public:schedules-detail", kwargs={"pk": schedule.public_primary_key})
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ result = {
+ "id": schedule.public_primary_key,
+ "team_id": None,
+ "name": schedule.name,
+ "type": "calendar",
+ "time_zone": "UTC",
+ "on_call_now": [],
+ "shifts": [],
+ "slack": {
+ "channel_id": "SLACKCHANNELID",
+ "user_group_id": None,
+ },
+ "ical_url_overrides": None,
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == result
+
+
+@pytest.mark.django_db
+def test_create_calendar_schedule(make_organization_and_user_with_token):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ url = reverse("api-public:schedules-list")
+
+ data = {
+ "team_id": None,
+ "name": "schedule test name",
+ "time_zone": "Europe/Moscow",
+ "type": "calendar",
+ }
+
+ response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+ schedule = OnCallSchedule.objects.get(public_primary_key=response.data["id"])
+
+ result = {
+ "id": schedule.public_primary_key,
+ "team_id": None,
+ "name": schedule.name,
+ "type": "calendar",
+ "time_zone": "Europe/Moscow",
+ "on_call_now": [],
+ "shifts": [],
+ "slack": {
+ "channel_id": None,
+ "user_group_id": None,
+ },
+ "ical_url_overrides": None,
+ }
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.json() == result
+
+
+@pytest.mark.django_db
+def test_update_calendar_schedule(
+ make_organization_and_user_with_token,
+ make_schedule,
+):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ slack_channel_id = "SLACKCHANNELID"
+
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleCalendar,
+ channel=slack_channel_id,
+ )
+
+ url = reverse("api-public:schedules-detail", kwargs={"pk": schedule.public_primary_key})
+
+ data = {
+ "name": "RENAMED",
+ "time_zone": "Europe/Moscow",
+ }
+
+ assert schedule.name != data["name"]
+ assert schedule.time_zone != data["time_zone"]
+
+ response = client.put(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ result = {
+ "id": schedule.public_primary_key,
+ "team_id": None,
+ "name": data["name"],
+ "type": "calendar",
+ "time_zone": data["time_zone"],
+ "on_call_now": [],
+ "shifts": [],
+ "slack": {
+ "channel_id": "SLACKCHANNELID",
+ "user_group_id": None,
+ },
+ "ical_url_overrides": None,
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ schedule.refresh_from_db()
+ assert schedule.name == data["name"]
+ assert schedule.time_zone == data["time_zone"]
+ assert response.json() == result
+
+
+@pytest.mark.django_db
+def test_update_ical_url_overrides_calendar_schedule(
+ make_organization_and_user_with_token,
+ make_schedule,
+):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ slack_channel_id = "SLACKCHANNELID"
+
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleCalendar,
+ channel=slack_channel_id,
+ )
+
+ url = reverse("api-public:schedules-detail", kwargs={"pk": schedule.public_primary_key})
+
+ data = {"ical_url_overrides": ICAL_URL}
+
+ with patch("common.api_helpers.utils.validate_ical_url", return_value=ICAL_URL):
+ response = client.put(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ result = {
+ "id": schedule.public_primary_key,
+ "team_id": None,
+ "name": schedule.name,
+ "type": "calendar",
+ "time_zone": schedule.time_zone,
+ "on_call_now": [],
+ "shifts": [],
+ "slack": {
+ "channel_id": "SLACKCHANNELID",
+ "user_group_id": None,
+ },
+ "ical_url_overrides": ICAL_URL,
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == result
+
+
+@pytest.mark.django_db
+def test_update_calendar_schedule_with_custom_event(
+ make_organization_and_user_with_token,
+ make_schedule,
+ make_on_call_shift,
+):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ slack_channel_id = "SLACKCHANNELID"
+
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleCalendar,
+ channel=slack_channel_id,
+ )
+ data = {
+ "start": timezone.now().replace(tzinfo=None, microsecond=0),
+ "duration": timezone.timedelta(seconds=10800),
+ }
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_SINGLE_EVENT, **data
+ )
+
+ url = reverse("api-public:schedules-detail", kwargs={"pk": schedule.public_primary_key})
+
+ data = {
+ "shifts": [on_call_shift.public_primary_key],
+ }
+
+ assert len(schedule.custom_on_call_shifts.all()) == 0
+
+ response = client.put(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ result = {
+ "id": schedule.public_primary_key,
+ "team_id": None,
+ "name": schedule.name,
+ "type": "calendar",
+ "time_zone": schedule.time_zone,
+ "on_call_now": [],
+ "shifts": data["shifts"],
+ "slack": {
+ "channel_id": "SLACKCHANNELID",
+ "user_group_id": None,
+ },
+ "ical_url_overrides": None,
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ schedule.refresh_from_db()
+ assert len(schedule.custom_on_call_shifts.all()) == 1
+ assert response.json() == result
+
+
+@pytest.mark.django_db
+def test_delete_calendar_schedule(
+ make_organization_and_user_with_token,
+ make_schedule,
+):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
+
+ url = reverse("api-public:schedules-detail", kwargs={"pk": schedule.public_primary_key})
+
+ response = client.delete(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+
+ with pytest.raises(OnCallSchedule.DoesNotExist):
+ schedule.refresh_from_db()
+
+
+@pytest.mark.django_db
+def test_get_ical_schedule(
+ make_organization_and_user_with_token,
+ make_schedule,
+):
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ slack_channel_id = "SLACKCHANNELID"
+
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ channel=slack_channel_id,
+ ical_url_primary=ICAL_URL,
+ )
+
+ url = reverse("api-public:schedules-detail", kwargs={"pk": schedule.public_primary_key})
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ result = {
+ "id": schedule.public_primary_key,
+ "team_id": None,
+ "name": schedule.name,
+ "type": "ical",
+ "ical_url_primary": ICAL_URL,
+ "ical_url_overrides": None,
+ "on_call_now": [],
+ "slack": {
+ "channel_id": "SLACKCHANNELID",
+ "user_group_id": None,
+ },
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == result
+
+
+@pytest.mark.django_db
+def test_create_ical_schedule(make_organization_and_user_with_token):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ url = reverse("api-public:schedules-list")
+ data = {
+ "team_id": None,
+ "name": "schedule test name",
+ "ical_url_primary": ICAL_URL,
+ "type": "ical",
+ }
+
+ with patch(
+ "apps.public_api.serializers.schedules_ical.ScheduleICalSerializer.validate_ical_url_primary",
+ return_value=ICAL_URL,
+ ):
+ response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+ schedule = OnCallSchedule.objects.get(public_primary_key=response.data["id"])
+
+ result = {
+ "id": schedule.public_primary_key,
+ "team_id": None,
+ "name": schedule.name,
+ "type": "ical",
+ "ical_url_primary": ICAL_URL,
+ "ical_url_overrides": None,
+ "on_call_now": [],
+ "slack": {
+ "channel_id": None,
+ "user_group_id": None,
+ },
+ }
+
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.json() == result
+
+
+@pytest.mark.django_db
+def test_update_ical_schedule(
+ make_organization_and_user_with_token,
+ make_schedule,
+):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ slack_channel_id = "SLACKCHANNELID"
+
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ channel=slack_channel_id,
+ ical_url_primary=ICAL_URL,
+ )
+
+ url = reverse("api-public:schedules-detail", kwargs={"pk": schedule.public_primary_key})
+
+ data = {
+ "name": "RENAMED",
+ }
+
+ assert schedule.name != data["name"]
+
+ response = client.put(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ result = {
+ "id": schedule.public_primary_key,
+ "team_id": None,
+ "name": data["name"],
+ "type": "ical",
+ "ical_url_primary": ICAL_URL,
+ "ical_url_overrides": None,
+ "on_call_now": [],
+ "slack": {
+ "channel_id": "SLACKCHANNELID",
+ "user_group_id": None,
+ },
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ schedule.refresh_from_db()
+ assert schedule.name == data["name"]
+ assert response.json() == result
+
+
+@pytest.mark.django_db
+def test_delete_ical_schedule(
+ make_organization_and_user_with_token,
+ make_schedule,
+):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ ical_url_primary=ICAL_URL,
+ )
+
+ url = reverse("api-public:schedules-detail", kwargs={"pk": schedule.public_primary_key})
+
+ response = client.delete(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_204_NO_CONTENT
+
+ with pytest.raises(OnCallSchedule.DoesNotExist):
+ schedule.refresh_from_db()
+
+
+@pytest.mark.django_db
+def test_get_schedule_list(
+ make_slack_team_identity,
+ make_organization,
+ make_user_for_organization,
+ make_public_api_token,
+ make_slack_user_group,
+ make_schedule,
+):
+ slack_team_identity = make_slack_team_identity()
+ organization = make_organization(slack_team_identity=slack_team_identity)
+ user = make_user_for_organization(organization=organization)
+ _, token = make_public_api_token(user, organization)
+
+ slack_channel_id = "SLACKCHANNELID"
+ user_group_id = "SLACKGROUPID"
+
+ user_group = make_slack_user_group(slack_team_identity, slack_id=user_group_id)
+
+ schedule_calendar = make_schedule(
+ organization, schedule_class=OnCallScheduleCalendar, channel=slack_channel_id, user_group=user_group
+ )
+
+ schedule_ical = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ channel=slack_channel_id,
+ ical_url_primary=ICAL_URL,
+ user_group=user_group,
+ )
+
+ client = APIClient()
+ url = reverse("api-public:schedules-list")
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ result = {
+ "count": 2,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": schedule_calendar.public_primary_key,
+ "team_id": None,
+ "name": schedule_calendar.name,
+ "type": "calendar",
+ "time_zone": "UTC",
+ "on_call_now": [],
+ "shifts": [],
+ "slack": {"channel_id": slack_channel_id, "user_group_id": user_group_id},
+ "ical_url_overrides": None,
+ },
+ {
+ "id": schedule_ical.public_primary_key,
+ "team_id": None,
+ "name": schedule_ical.name,
+ "type": "ical",
+ "ical_url_primary": ICAL_URL,
+ "ical_url_overrides": None,
+ "on_call_now": [],
+ "slack": {"channel_id": slack_channel_id, "user_group_id": user_group_id},
+ },
+ ],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == result
+
+
+@pytest.mark.django_db
+def test_create_schedule_wrong_type(make_organization_and_user_with_token):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ url = reverse("api-public:schedules-list")
+ data = {
+ "team_id": None,
+ "name": "schedule test name",
+ "ical_url_primary": ICAL_URL,
+ "type": "wrong_type",
+ }
+
+ with patch(
+ "apps.public_api.serializers.schedules_ical.ScheduleICalSerializer.validate_ical_url_primary",
+ return_value=ICAL_URL,
+ ):
+ response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+def test_create_ical_schedule_without_ical_url(make_organization_and_user_with_token):
+
+ organization, user, token = make_organization_and_user_with_token()
+ client = APIClient()
+
+ url = reverse("api-public:schedules-list")
+ data = {
+ "team_id": None,
+ "name": "schedule test name",
+ "type": "ical",
+ }
+ response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+ data = {
+ "team_id": None,
+ "name": "schedule test name",
+ "ical_url_primary": None,
+ "type": "ical",
+ }
+ response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
+ assert response.status_code == status.HTTP_400_BAD_REQUEST
diff --git a/engine/apps/public_api/tests/test_slack_channels.py b/engine/apps/public_api/tests/test_slack_channels.py
new file mode 100644
index 0000000000..5884dcedcd
--- /dev/null
+++ b/engine/apps/public_api/tests/test_slack_channels.py
@@ -0,0 +1,38 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+
+@pytest.fixture()
+def slack_channels_public_api_setup(
+ make_organization_and_user_with_slack_identities,
+ make_public_api_token,
+ make_slack_channel,
+):
+ organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities()
+ _, token = make_public_api_token(user, organization)
+ slack_channel = make_slack_channel(slack_team_identity, slack_id="TEST_SLACK_CHANNEL")
+ return organization, user, token, slack_team_identity, slack_user_identity, slack_channel
+
+
+@pytest.mark.django_db
+def test_get_slack_channels_list(
+ slack_channels_public_api_setup,
+):
+ _, _, token, _, _, slack_channel = slack_channels_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:slack_channels-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ expected_response = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [{"name": slack_channel.name, "slack_id": slack_channel.slack_id}],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
diff --git a/engine/apps/public_api/tests/test_teams.py b/engine/apps/public_api/tests/test_teams.py
new file mode 100644
index 0000000000..946585df2c
--- /dev/null
+++ b/engine/apps/public_api/tests/test_teams.py
@@ -0,0 +1,65 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+
+@pytest.fixture()
+def team_public_api_setup(
+ make_organization_and_user,
+ make_public_api_token,
+ make_team,
+):
+ organization, user = make_organization_and_user()
+ _, token = make_public_api_token(user, organization)
+ team = make_team(organization)
+ team.users.add(user)
+ return organization, user, token, team
+
+
+@pytest.mark.django_db
+def test_get_teams_list(team_public_api_setup):
+ _, _, token, team = team_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:teams-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ expected_payload = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": team.public_primary_key,
+ "name": team.name,
+ "email": team.email,
+ "avatar_url": team.avatar_url,
+ }
+ ],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_payload
+
+
+@pytest.mark.django_db
+def test_get_team(team_public_api_setup):
+ _, _, token, team = team_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:teams-detail", kwargs={"pk": team.public_primary_key})
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ expected_payload = {
+ "id": team.public_primary_key,
+ "name": team.name,
+ "email": team.email,
+ "avatar_url": team.avatar_url,
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_payload
diff --git a/engine/apps/public_api/tests/test_user_groups.py b/engine/apps/public_api/tests/test_user_groups.py
new file mode 100644
index 0000000000..055d93c689
--- /dev/null
+++ b/engine/apps/public_api/tests/test_user_groups.py
@@ -0,0 +1,104 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+
+@pytest.fixture()
+def slack_user_group_public_api_setup(
+ make_organization_and_user_with_slack_identities,
+ make_public_api_token,
+ make_slack_user_group,
+):
+ organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities()
+ _, token = make_public_api_token(user, organization)
+ slack_user_group = make_slack_user_group(slack_team_identity, slack_id="SLACK_GROUP_ID")
+ return organization, user, token, slack_team_identity, slack_user_identity, slack_user_group
+
+
+@pytest.mark.django_db
+def test_get_user_groups(
+ slack_user_group_public_api_setup,
+):
+ _, _, token, _, _, slack_user_group = slack_user_group_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:user_groups-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ expected_payload = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": slack_user_group.public_primary_key,
+ "type": "slack_based",
+ "slack": {
+ "id": slack_user_group.slack_id,
+ "name": slack_user_group.name,
+ "handle": slack_user_group.handle,
+ },
+ }
+ ],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_payload
+
+
+@pytest.mark.django_db
+def test_get_user_groups_filter_by_handle(
+ slack_user_group_public_api_setup,
+ make_slack_user_group,
+):
+ _, _, token, slack_team_identity, slack_user_identity, slack_user_group_1 = slack_user_group_public_api_setup
+
+ client = APIClient()
+
+ make_slack_user_group(slack_team_identity, slack_id="SLACK_GROUP_ID_2")
+
+ url = reverse("api-public:user_groups-list")
+
+ response = client.get(
+ f"{url}?slack_handle={slack_user_group_1.handle}", format="json", HTTP_AUTHORIZATION=f"{token}"
+ )
+
+ expected_payload = {
+ "count": 1,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": slack_user_group_1.public_primary_key,
+ "type": "slack_based",
+ "slack": {
+ "id": slack_user_group_1.slack_id,
+ "name": slack_user_group_1.name,
+ "handle": slack_user_group_1.handle,
+ },
+ }
+ ],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_payload
+
+
+@pytest.mark.django_db
+def test_get_user_groups_filter_by_handle_empty_result(
+ slack_user_group_public_api_setup,
+):
+ _, _, token, slack_team_identity, _, slack_user_group = slack_user_group_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:user_groups-list")
+
+ response = client.get(f"{url}?slack_handle=NonExistentSlackHandle", format="json", HTTP_AUTHORIZATION=f"{token}")
+
+ expected_payload = {"count": 0, "next": None, "previous": None, "results": []}
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.data == expected_payload
diff --git a/engine/apps/public_api/tests/test_users.py b/engine/apps/public_api/tests/test_users.py
new file mode 100644
index 0000000000..7208976c8d
--- /dev/null
+++ b/engine/apps/public_api/tests/test_users.py
@@ -0,0 +1,142 @@
+import pytest
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.test import APIClient
+
+
+@pytest.fixture()
+def user_public_api_setup(
+ make_organization_and_user_with_slack_identities,
+ make_public_api_token,
+):
+ organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities()
+ _, token = make_public_api_token(user, organization)
+ return organization, user, token, slack_team_identity, slack_user_identity
+
+
+@pytest.mark.django_db
+def test_get_user(
+ user_public_api_setup,
+):
+ organization, user, token, slack_team_identity, slack_user_identity = user_public_api_setup
+
+ client = APIClient()
+
+ url = reverse("api-public:users-detail", args=[user.public_primary_key])
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ expected_response = {
+ "id": user.public_primary_key,
+ "email": user.email,
+ "slack": {"user_id": slack_user_identity.slack_id, "team_id": slack_team_identity.slack_id},
+ "username": user.username,
+ "role": "admin",
+ "is_phone_number_verified": False,
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+ # get current user
+ url = reverse("api-public:users-detail", args=["current"])
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_get_users_list(
+ user_public_api_setup,
+ make_user_for_organization,
+):
+ organization, user_1, token, slack_team_identity, slack_user_identity = user_public_api_setup
+ user_2 = make_user_for_organization(organization)
+
+ client = APIClient()
+
+ url = reverse("api-public:users-list")
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=token)
+
+ expected_response = {
+ "count": 2,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": user_1.public_primary_key,
+ "email": user_1.email,
+ "slack": {"user_id": slack_user_identity.slack_id, "team_id": slack_team_identity.slack_id},
+ "username": user_1.username,
+ "role": "admin",
+ "is_phone_number_verified": False,
+ },
+ {
+ "id": user_2.public_primary_key,
+ "email": user_2.email,
+ "slack": None,
+ "username": user_2.username,
+ "role": "admin",
+ "is_phone_number_verified": False,
+ },
+ ],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_get_users_list_short(
+ user_public_api_setup,
+ make_user_for_organization,
+):
+ organization, user_1, token, slack_team_identity, slack_user_identity = user_public_api_setup
+ user_2 = make_user_for_organization(organization)
+
+ client = APIClient()
+
+ url = reverse("api-public:users-list")
+ response = client.get(f"{url}?short=true", format="json", HTTP_AUTHORIZATION=token)
+
+ expected_response = {
+ "count": 2,
+ "next": None,
+ "previous": None,
+ "results": [
+ {
+ "id": user_1.public_primary_key,
+ "email": user_1.email,
+ "username": user_1.username,
+ "role": "admin",
+ "is_phone_number_verified": False,
+ },
+ {
+ "id": user_2.public_primary_key,
+ "email": user_2.email,
+ "username": user_2.username,
+ "role": "admin",
+ "is_phone_number_verified": False,
+ },
+ ],
+ }
+
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json() == expected_response
+
+
+@pytest.mark.django_db
+def test_forbidden_access(
+ make_organization_and_user,
+ make_organization_and_user_with_token,
+):
+ _, user = make_organization_and_user()
+ _, _, another_org_token = make_organization_and_user_with_token()
+
+ client = APIClient()
+
+ url = reverse("api-public:users-detail", args=[user.public_primary_key])
+
+ response = client.get(url, format="json", HTTP_AUTHORIZATION=another_org_token)
+
+ assert response.status_code == status.HTTP_404_NOT_FOUND
diff --git a/engine/apps/public_api/throttlers/__init__.py b/engine/apps/public_api/throttlers/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/public_api/throttlers/user_throttle.py b/engine/apps/public_api/throttlers/user_throttle.py
new file mode 100644
index 0000000000..4c46e2599a
--- /dev/null
+++ b/engine/apps/public_api/throttlers/user_throttle.py
@@ -0,0 +1,43 @@
+from rest_framework.throttling import UserRateThrottle
+
+
+class UserThrottle(UserRateThrottle):
+ """
+ __init__ and allow_request are overridden because we want rate 300/5m,
+ but default rate parser implementation doesn't allow to specify length of period (only m, d, etc.)
+ (See SimpleRateThrottle.parse_rate)
+
+ """
+
+ def __init__(self):
+ self.num_requests, self.duration = self.get_throttle_limits()
+
+ def get_throttle_limits(self):
+ """
+ This method exits for speed up tests.
+ :return tuple requests/seconds
+ """
+ return 300, 60
+
+ def allow_request(self, request, view):
+ """
+ Implement the check to see if the request should be throttled.
+
+ On success calls `throttle_success`.
+ On failure calls `throttle_failure`.
+ """
+
+ self.key = self.get_cache_key(request, view)
+ if self.key is None:
+ return True
+
+ self.history = self.cache.get(self.key, [])
+ self.now = self.timer()
+
+ # Drop any requests from the history which have now passed the
+ # throttle duration
+ while self.history and self.history[-1] <= self.now - self.duration:
+ self.history.pop()
+ if len(self.history) >= self.num_requests:
+ return self.throttle_failure()
+ return self.throttle_success()
diff --git a/engine/apps/public_api/urls.py b/engine/apps/public_api/urls.py
new file mode 100644
index 0000000000..95fa447af3
--- /dev/null
+++ b/engine/apps/public_api/urls.py
@@ -0,0 +1,33 @@
+from django.urls import include, path
+
+from common.api_helpers.optional_slash_router import OptionalSlashRouter, optional_slash_path
+
+from . import views
+
+app_name = "api-public"
+
+
+router = OptionalSlashRouter()
+
+router.register(r"organizations", views.OrganizationView, basename="organizations")
+router.register(r"users", views.UserView, basename="users")
+router.register(r"integrations", views.IntegrationView, basename="integrations")
+router.register(r"routes", views.ChannelFilterView, basename="routes")
+router.register(r"schedules", views.OnCallScheduleChannelView, basename="schedules")
+router.register(r"escalation_chains", views.EscalationChainView, basename="escalation_chains")
+router.register(r"escalation_policies", views.EscalationPolicyView, basename="escalation_policies")
+router.register(r"alerts", views.AlertView, basename="alerts")
+router.register(r"alert_groups", views.IncidentView, basename="alert_groups")
+router.register(r"slack_channels", views.SlackChannelView, basename="slack_channels")
+router.register(r"personal_notification_rules", views.PersonalNotificationView, basename="personal_notification_rules")
+router.register(r"resolution_notes", views.ResolutionNoteView, basename="resolution_notes")
+router.register(r"actions", views.ActionView, basename="actions")
+router.register(r"user_groups", views.UserGroupView, basename="user_groups")
+router.register(r"on_call_shifts", views.CustomOnCallShiftView, basename="on_call_shifts")
+router.register(r"teams", views.TeamView, basename="teams")
+
+
+urlpatterns = [
+ path("", include(router.urls)),
+ optional_slash_path("info", views.InfoView.as_view(), name="info"),
+]
diff --git a/engine/apps/public_api/views/__init__.py b/engine/apps/public_api/views/__init__.py
new file mode 100644
index 0000000000..1892d123b1
--- /dev/null
+++ b/engine/apps/public_api/views/__init__.py
@@ -0,0 +1,17 @@
+from .action import ActionView # noqa: F401
+from .alerts import AlertView # noqa: F401
+from .escalation_chains import EscalationChainView # noqa: F401
+from .escalation_policies import EscalationPolicyView # noqa: F401
+from .incidents import IncidentView # noqa: F401
+from .info import InfoView # noqa: F401
+from .integrations import IntegrationView # noqa: F401
+from .on_call_shifts import CustomOnCallShiftView # noqa: F401
+from .organizations import OrganizationView # noqa: F401
+from .personal_notifications import PersonalNotificationView # noqa: F401
+from .resolution_notes import ResolutionNoteView # noqa: F401
+from .routes import ChannelFilterView # noqa: F401
+from .schedules import OnCallScheduleChannelView # noqa: F401
+from .slack_channels import SlackChannelView # noqa: F401
+from .teams import TeamView # noqa: F401
+from .user_groups import UserGroupView # noqa: F401
+from .users import UserView # noqa: F401
diff --git a/engine/apps/public_api/views/action.py b/engine/apps/public_api/views/action.py
new file mode 100644
index 0000000000..bbb6bc732f
--- /dev/null
+++ b/engine/apps/public_api/views/action.py
@@ -0,0 +1,34 @@
+from django_filters import rest_framework as filters
+from rest_framework import mixins
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.viewsets import GenericViewSet
+
+from apps.alerts.models import CustomButton
+from apps.auth_token.auth import ApiTokenAuthentication
+from apps.public_api.serializers.action import ActionSerializer
+from apps.public_api.throttlers.user_throttle import UserThrottle
+from common.api_helpers.filters import ByTeamFilter
+from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin
+from common.api_helpers.paginators import FiftyPageSizePaginator
+
+
+class ActionView(RateLimitHeadersMixin, DemoTokenMixin, mixins.ListModelMixin, GenericViewSet):
+ authentication_classes = (ApiTokenAuthentication,)
+ permission_classes = (IsAuthenticated,)
+ pagination_class = FiftyPageSizePaginator
+ throttle_classes = [UserThrottle]
+
+ model = CustomButton
+ serializer_class = ActionSerializer
+
+ filter_backends = (filters.DjangoFilterBackend,)
+ filterset_class = ByTeamFilter
+
+ def get_queryset(self):
+ action_name = self.request.query_params.get("name", None)
+ queryset = CustomButton.objects.filter(organization=self.request.auth.organization)
+
+ if action_name:
+ queryset = queryset.filter(name=action_name)
+
+ return queryset
diff --git a/engine/apps/public_api/views/alerts.py b/engine/apps/public_api/views/alerts.py
new file mode 100644
index 0000000000..56fe651e48
--- /dev/null
+++ b/engine/apps/public_api/views/alerts.py
@@ -0,0 +1,44 @@
+from django.db.models import CharField
+from django.db.models.functions import Cast
+from rest_framework import mixins
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.viewsets import GenericViewSet
+
+from apps.alerts.models import Alert
+from apps.auth_token.auth import ApiTokenAuthentication
+from apps.public_api import constants as public_api_constants
+from apps.public_api.serializers.alerts import AlertSerializer
+from apps.public_api.throttlers.user_throttle import UserThrottle
+from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin
+from common.api_helpers.paginators import FiftyPageSizePaginator
+
+
+class AlertView(RateLimitHeadersMixin, DemoTokenMixin, mixins.ListModelMixin, GenericViewSet):
+ authentication_classes = (ApiTokenAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ throttle_classes = [UserThrottle]
+
+ model = Alert
+ serializer_class = AlertSerializer
+ pagination_class = FiftyPageSizePaginator
+
+ demo_default_id = public_api_constants.DEMO_ALERT_IDS[0]
+
+ def get_queryset(self):
+ alert_group_id = self.request.query_params.get("alert_group_id", None)
+ search = self.request.query_params.get("search", None)
+
+ queryset = Alert.objects.filter(group__channel__organization=self.request.auth.organization)
+
+ if alert_group_id:
+ queryset = queryset.filter(group__public_primary_key=alert_group_id)
+
+ if search:
+ queryset = queryset.annotate(
+ raw_request_data_str=Cast("raw_request_data", output_field=CharField())
+ ).filter(raw_request_data_str__icontains=search)
+
+ queryset = self.serializer_class.setup_eager_loading(queryset)
+
+ return queryset
diff --git a/engine/apps/public_api/views/escalation_chains.py b/engine/apps/public_api/views/escalation_chains.py
new file mode 100644
index 0000000000..515e80177a
--- /dev/null
+++ b/engine/apps/public_api/views/escalation_chains.py
@@ -0,0 +1,85 @@
+from django_filters import rest_framework as filters
+from rest_framework.exceptions import NotFound
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.viewsets import ModelViewSet
+
+from apps.alerts.models import EscalationChain
+from apps.auth_token.auth import ApiTokenAuthentication
+from apps.public_api.serializers import EscalationChainSerializer
+from apps.public_api.serializers.escalation_chains import EscalationChainUpdateSerializer
+from apps.public_api.throttlers.user_throttle import UserThrottle
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.filters import ByTeamFilter
+from common.api_helpers.mixins import RateLimitHeadersMixin, UpdateSerializerMixin
+from common.api_helpers.paginators import FiftyPageSizePaginator
+
+
+class EscalationChainView(RateLimitHeadersMixin, UpdateSerializerMixin, ModelViewSet):
+ authentication_classes = (ApiTokenAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ throttle_classes = [UserThrottle]
+
+ model = EscalationChain
+ serializer_class = EscalationChainSerializer
+ update_serializer_class = EscalationChainUpdateSerializer
+
+ pagination_class = FiftyPageSizePaginator
+
+ filter_backends = (filters.DjangoFilterBackend,)
+ filterset_class = ByTeamFilter
+
+ def get_queryset(self):
+ queryset = self.request.auth.organization.escalation_chains.all()
+
+ name = self.request.query_params.get("name")
+ if name is not None:
+ queryset = queryset.filter(name=name)
+
+ return queryset
+
+ def get_object(self):
+ public_primary_key = self.kwargs["pk"]
+
+ try:
+ return self.request.auth.organization.escalation_chains.get(public_primary_key=public_primary_key)
+ except EscalationChain.DoesNotExist:
+ raise NotFound
+
+ def perform_create(self, serializer):
+ serializer.save()
+
+ instance = serializer.instance
+ description = f"Escalation chain {instance.name} was created"
+ create_organization_log(
+ instance.organization,
+ self.request.user,
+ OrganizationLogType.TYPE_ESCALATION_CHAIN_CREATED,
+ description,
+ )
+
+ def perform_destroy(self, instance):
+ instance.delete()
+
+ description = f"Escalation chain {instance.name} was deleted"
+ create_organization_log(
+ instance.organization,
+ self.request.user,
+ OrganizationLogType.TYPE_ESCALATION_CHAIN_DELETED,
+ description,
+ )
+
+ def perform_update(self, serializer):
+ instance = serializer.instance
+ old_state = instance.repr_settings_for_client_side_logging
+
+ serializer.save()
+
+ new_state = instance.repr_settings_for_client_side_logging
+ description = f"Escalation chain {instance.name} was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(
+ instance.organization,
+ self.request.user,
+ OrganizationLogType.TYPE_ESCALATION_CHAIN_CHANGED,
+ description,
+ )
diff --git a/engine/apps/public_api/views/escalation_policies.py b/engine/apps/public_api/views/escalation_policies.py
new file mode 100644
index 0000000000..fc285588f8
--- /dev/null
+++ b/engine/apps/public_api/views/escalation_policies.py
@@ -0,0 +1,88 @@
+from django.db.models import Q
+from rest_framework.exceptions import NotFound
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.viewsets import ModelViewSet
+
+from apps.alerts.models import EscalationPolicy
+from apps.auth_token.auth import ApiTokenAuthentication
+from apps.public_api import constants as public_api_constants
+from apps.public_api.serializers import EscalationPolicySerializer, EscalationPolicyUpdateSerializer
+from apps.public_api.throttlers.user_throttle import UserThrottle
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin, UpdateSerializerMixin
+from common.api_helpers.paginators import FiftyPageSizePaginator
+
+
+class EscalationPolicyView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSerializerMixin, ModelViewSet):
+ authentication_classes = (ApiTokenAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ throttle_classes = [UserThrottle]
+
+ model = EscalationPolicy
+ serializer_class = EscalationPolicySerializer
+ update_serializer_class = EscalationPolicyUpdateSerializer
+
+ pagination_class = FiftyPageSizePaginator
+
+ demo_default_id = public_api_constants.DEMO_ESCALATION_POLICY_ID_1
+
+ def get_queryset(self):
+ escalation_chain_id = self.request.query_params.get("escalation_chain_id", None)
+ queryset = EscalationPolicy.objects.filter(
+ Q(escalation_chain__organization=self.request.auth.organization),
+ Q(step__in=EscalationPolicy.PUBLIC_STEP_CHOICES_MAP) | Q(step__isnull=True),
+ )
+ queryset = self.serializer_class.setup_eager_loading(queryset)
+
+ if escalation_chain_id:
+ queryset = queryset.filter(escalation_chain__public_primary_key=escalation_chain_id)
+
+ return queryset.order_by("escalation_chain", "order")
+
+ def get_object(self):
+ public_primary_key = self.kwargs["pk"]
+
+ try:
+ return EscalationPolicy.objects.filter(
+ Q(escalation_chain__organization=self.request.auth.organization),
+ Q(step__in=EscalationPolicy.PUBLIC_STEP_CHOICES_MAP) | Q(step__isnull=True),
+ ).get(public_primary_key=public_primary_key)
+ except EscalationPolicy.DoesNotExist:
+ raise NotFound
+
+ def perform_create(self, serializer):
+ serializer.save()
+ instance = serializer.instance
+ organization = self.request.auth.organization
+ user = self.request.user
+ escalation_chain = instance.escalation_chain
+ description = (
+ f"Escalation step '{instance.step_type_verbal}' with order {instance.order} was created for "
+ f"escalation chain '{escalation_chain.name}'"
+ )
+ create_organization_log(organization, user, OrganizationLogType.TYPE_ESCALATION_STEP_CREATED, description)
+
+ def perform_update(self, serializer):
+ organization = self.request.auth.organization
+ user = self.request.user
+ old_state = serializer.instance.repr_settings_for_client_side_logging
+ serializer.save()
+ new_state = serializer.instance.repr_settings_for_client_side_logging
+ escalation_chain = serializer.instance.escalation_chain
+ description = (
+ f"Settings for escalation step of escalation chain '{escalation_chain.name}' was changed "
+ f"from:\n{old_state}\nto:\n{new_state}"
+ )
+ create_organization_log(organization, user, OrganizationLogType.TYPE_ESCALATION_STEP_CHANGED, description)
+
+ def perform_destroy(self, instance):
+ organization = self.request.auth.organization
+ user = self.request.user
+ escalation_chain = instance.escalation_chain
+ description = (
+ f"Escalation step '{instance.step_type_verbal}' with order {instance.order} of "
+ f"escalation chain '{escalation_chain.name}' was deleted"
+ )
+ create_organization_log(organization, user, OrganizationLogType.TYPE_ESCALATION_STEP_DELETED, description)
+ instance.delete()
diff --git a/engine/apps/public_api/views/incidents.py b/engine/apps/public_api/views/incidents.py
new file mode 100644
index 0000000000..1bfe830ee7
--- /dev/null
+++ b/engine/apps/public_api/views/incidents.py
@@ -0,0 +1,98 @@
+from django_filters import rest_framework as filters
+from rest_framework import mixins, status
+from rest_framework.exceptions import NotFound
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.viewsets import GenericViewSet
+
+from apps.alerts.models import AlertGroup
+from apps.alerts.tasks import delete_alert_group, wipe
+from apps.auth_token.auth import ApiTokenAuthentication
+from apps.public_api import constants as public_api_constants
+from apps.public_api.constants import VALID_DATE_FOR_DELETE_INCIDENT
+from apps.public_api.helpers import is_valid_group_creation_date, team_has_slack_token_for_deleting
+from apps.public_api.serializers import IncidentSerializer
+from apps.public_api.throttlers.user_throttle import UserThrottle
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.filters import ByTeamModelFieldFilterMixin, get_team_queryset
+from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin
+from common.api_helpers.paginators import FiftyPageSizePaginator
+
+
+class IncidentByTeamFilter(ByTeamModelFieldFilterMixin, filters.FilterSet):
+ team = filters.ModelChoiceFilter(
+ field_name="channel__team",
+ queryset=get_team_queryset,
+ to_field_name="public_primary_key",
+ null_label="noteam",
+ null_value="null",
+ method=ByTeamModelFieldFilterMixin.filter_model_field_with_single_value.__name__,
+ )
+
+
+class IncidentView(
+ RateLimitHeadersMixin, DemoTokenMixin, mixins.ListModelMixin, mixins.DestroyModelMixin, GenericViewSet
+):
+ authentication_classes = (ApiTokenAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ throttle_classes = [UserThrottle]
+
+ model = AlertGroup
+ serializer_class = IncidentSerializer
+ pagination_class = FiftyPageSizePaginator
+
+ demo_default_id = public_api_constants.DEMO_INCIDENT_ID
+
+ filter_backends = (filters.DjangoFilterBackend,)
+ filterset_class = IncidentByTeamFilter
+
+ def get_queryset(self):
+ route_id = self.request.query_params.get("route_id", None)
+ integration_id = self.request.query_params.get("integration_id", None)
+
+ queryset = AlertGroup.unarchived_objects.filter(
+ channel__organization=self.request.auth.organization,
+ ).order_by("-started_at")
+
+ if route_id:
+ queryset = queryset.filter(channel_filter__public_primary_key=route_id)
+ if integration_id:
+ queryset = queryset.filter(channel__public_primary_key=integration_id)
+
+ queryset = self.serializer_class.setup_eager_loading(queryset)
+
+ return queryset
+
+ def get_object(self):
+ public_primary_key = self.kwargs["pk"]
+
+ try:
+ return AlertGroup.unarchived_objects.filter(
+ channel__organization=self.request.auth.organization,
+ ).get(public_primary_key=public_primary_key)
+ except AlertGroup.DoesNotExist:
+ raise NotFound
+
+ def destroy(self, request, *args, **kwargs):
+ instance = self.get_object()
+ if not isinstance(request.data, dict):
+ return Response(data="A dict with a `mode` key is expected", status=status.HTTP_400_BAD_REQUEST)
+ mode = request.data.get("mode")
+ if mode == "delete":
+ if not team_has_slack_token_for_deleting(instance):
+ raise BadRequest(
+ detail="Your OnCall Bot in Slack is outdated. Please reinstall OnCall Bot and try again."
+ )
+ elif not is_valid_group_creation_date(instance):
+ raise BadRequest(
+ detail=f"We are unable to “delete” old alert_groups (created before "
+ f"{VALID_DATE_FOR_DELETE_INCIDENT.strftime('%d %B %Y')}) using API. "
+ f"Please use “wipe” mode or contact help. Sorry for that!"
+ )
+ else:
+ delete_alert_group.apply_async((instance.pk, request.user.pk))
+ else:
+ wipe.apply_async((instance.pk, request.user.pk))
+
+ return Response(status=status.HTTP_204_NO_CONTENT)
diff --git a/engine/apps/public_api/views/info.py b/engine/apps/public_api/views/info.py
new file mode 100644
index 0000000000..f96491812b
--- /dev/null
+++ b/engine/apps/public_api/views/info.py
@@ -0,0 +1,17 @@
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.auth_token.auth import ApiTokenAuthentication
+from apps.public_api.throttlers.user_throttle import UserThrottle
+
+
+class InfoView(APIView):
+ authentication_classes = (ApiTokenAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ throttle_classes = [UserThrottle]
+
+ def get(self, request):
+ response = {"url": self.request.auth.organization.grafana_url}
+ return Response(response)
diff --git a/engine/apps/public_api/views/integrations.py b/engine/apps/public_api/views/integrations.py
new file mode 100644
index 0000000000..8aa4784eaa
--- /dev/null
+++ b/engine/apps/public_api/views/integrations.py
@@ -0,0 +1,82 @@
+from django.db.models import Count
+from django_filters import rest_framework as filters
+from rest_framework.exceptions import NotFound
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.viewsets import ModelViewSet
+
+from apps.alerts.models import AlertReceiveChannel
+from apps.auth_token.auth import ApiTokenAuthentication
+from apps.public_api import constants as public_api_constants
+from apps.public_api.serializers import IntegrationSerializer, IntegrationUpdateSerializer
+from apps.public_api.throttlers.user_throttle import UserThrottle
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.filters import ByTeamFilter
+from common.api_helpers.mixins import (
+ DemoTokenMixin,
+ FilterSerializerMixin,
+ RateLimitHeadersMixin,
+ UpdateSerializerMixin,
+)
+from common.api_helpers.paginators import FiftyPageSizePaginator
+
+from .maintaiable_object_mixin import MaintainableObjectMixin
+
+
+class IntegrationView(
+ RateLimitHeadersMixin,
+ DemoTokenMixin,
+ FilterSerializerMixin,
+ UpdateSerializerMixin,
+ MaintainableObjectMixin,
+ ModelViewSet,
+):
+ authentication_classes = (ApiTokenAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ throttle_classes = [UserThrottle]
+
+ model = AlertReceiveChannel
+ serializer_class = IntegrationSerializer
+ update_serializer_class = IntegrationUpdateSerializer
+
+ pagination_class = FiftyPageSizePaginator
+
+ demo_default_id = public_api_constants.DEMO_INTEGRATION_ID
+
+ filter_backends = (filters.DjangoFilterBackend,)
+ filterset_class = ByTeamFilter
+
+ def get_queryset(self):
+ queryset = AlertReceiveChannel.objects.filter(organization=self.request.auth.organization).order_by(
+ "created_at"
+ )
+ queryset = self.serializer_class.setup_eager_loading(queryset)
+ queryset = queryset.annotate(alert_groups_count_annotated=Count("alert_groups", distinct=True))
+ return queryset
+
+ def get_object(self):
+ public_primary_key = self.kwargs["pk"]
+
+ try:
+ return self.get_queryset().get(public_primary_key=public_primary_key)
+ except AlertReceiveChannel.DoesNotExist:
+ raise NotFound
+
+ def perform_update(self, serializer):
+ old_state = serializer.instance.repr_settings_for_client_side_logging
+ serializer.save()
+ new_state = serializer.instance.repr_settings_for_client_side_logging
+ description = f"Integration settings was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(
+ serializer.instance.organization,
+ self.request.user,
+ OrganizationLogType.TYPE_INTEGRATION_CHANGED,
+ description,
+ )
+
+ def perform_destroy(self, instance):
+ organization = instance.organization
+ user = self.request.user
+ description = f"Integration {instance.verbal_name} was deleted"
+ create_organization_log(organization, user, OrganizationLogType.TYPE_INTEGRATION_DELETED, description)
+ instance.delete()
diff --git a/engine/apps/public_api/views/maintaiable_object_mixin.py b/engine/apps/public_api/views/maintaiable_object_mixin.py
new file mode 100644
index 0000000000..b99985f83b
--- /dev/null
+++ b/engine/apps/public_api/views/maintaiable_object_mixin.py
@@ -0,0 +1,48 @@
+from rest_framework import viewsets
+from rest_framework.decorators import action
+from rest_framework.response import Response
+
+from apps.alerts.models import MaintainableObject
+from common.api_helpers.exceptions import BadRequest
+from common.exceptions import MaintenanceCouldNotBeStartedError
+
+
+class MaintainableObjectMixin(viewsets.ViewSet):
+ """
+ Should be inherited by ModelViewSet.
+ The target model should be inherited from MaintainableObject.
+ """
+
+ @action(detail=True, methods=["post"])
+ def maintenance_start(self, request, pk) -> Response:
+ mode = str(request.data.get("mode", None)).lower()
+ duration = request.data.get("duration", None)
+
+ if mode not in [
+ str(MaintainableObject.DEBUG_MAINTENANCE_KEY).lower(),
+ str(MaintainableObject.MAINTENANCE_KEY).lower(),
+ ]:
+ raise BadRequest(detail={"mode": ["Unknown mode"]})
+ else:
+ mode = {str(x[1]).lower(): x[0] for x in MaintainableObject.MAINTENANCE_MODE_CHOICES}[mode]
+
+ try:
+ duration = int(duration) # We intentionally allow agile durations
+ except (ValueError, TypeError):
+ raise BadRequest(detail={"duration": ["Invalid duration"]})
+
+ instance = self.get_object()
+ try:
+ instance.start_maintenance(mode, duration, request.user)
+ except MaintenanceCouldNotBeStartedError as e:
+ raise BadRequest(detail=str(e))
+
+ return self.retrieve(request, pk)
+
+ @action(detail=True, methods=["post"])
+ def maintenance_stop(self, request, pk) -> Response:
+ instance = self.get_object()
+ user = request.user
+ instance.force_disable_maintenance(user)
+
+ return self.retrieve(request, pk)
diff --git a/engine/apps/public_api/views/on_call_shifts.py b/engine/apps/public_api/views/on_call_shifts.py
new file mode 100644
index 0000000000..5f366f1943
--- /dev/null
+++ b/engine/apps/public_api/views/on_call_shifts.py
@@ -0,0 +1,82 @@
+from django_filters.rest_framework import DjangoFilterBackend
+from rest_framework.exceptions import NotFound
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.viewsets import ModelViewSet
+
+from apps.auth_token.auth import ApiTokenAuthentication
+from apps.public_api import constants as public_api_constants
+from apps.public_api.serializers import CustomOnCallShiftSerializer, CustomOnCallShiftUpdateSerializer
+from apps.public_api.throttlers.user_throttle import UserThrottle
+from apps.schedules.models import CustomOnCallShift
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.filters import ByTeamFilter
+from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin, UpdateSerializerMixin
+from common.api_helpers.paginators import FiftyPageSizePaginator
+
+
+class CustomOnCallShiftView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSerializerMixin, ModelViewSet):
+ authentication_classes = (ApiTokenAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ throttle_classes = [UserThrottle]
+
+ model = CustomOnCallShift
+ serializer_class = CustomOnCallShiftSerializer
+ update_serializer_class = CustomOnCallShiftUpdateSerializer
+
+ pagination_class = FiftyPageSizePaginator
+
+ filter_backends = [DjangoFilterBackend]
+ filterset_class = ByTeamFilter
+
+ demo_default_id = public_api_constants.DEMO_ON_CALL_SHIFT_ID_1
+
+ def get_queryset(self):
+ name = self.request.query_params.get("name", None)
+ schedule_id = self.request.query_params.get("schedule_id", None)
+
+ queryset = CustomOnCallShift.objects.filter(organization=self.request.auth.organization)
+
+ if schedule_id:
+ queryset = queryset.filter(schedules__public_primary_key=schedule_id)
+ if name:
+ queryset = queryset.filter(name=name)
+ return queryset.order_by("schedules")
+
+ def get_object(self):
+ public_primary_key = self.kwargs["pk"]
+
+ try:
+ return CustomOnCallShift.objects.filter(
+ organization=self.request.auth.organization,
+ ).get(public_primary_key=public_primary_key)
+ except CustomOnCallShift.DoesNotExist:
+ raise NotFound
+
+ def perform_create(self, serializer):
+ serializer.save()
+ instance = serializer.instance
+ organization = self.request.auth.organization
+ user = self.request.user
+ description = (
+ f"Custom on-call shift with params: {instance.repr_settings_for_client_side_logging} " f"was created"
+ )
+ create_organization_log(organization, user, OrganizationLogType.TYPE_ON_CALL_SHIFT_CREATED, description)
+
+ def perform_update(self, serializer):
+ organization = self.request.auth.organization
+ user = self.request.user
+ old_state = serializer.instance.repr_settings_for_client_side_logging
+ serializer.save()
+ new_state = serializer.instance.repr_settings_for_client_side_logging
+ description = f"Settings of custom on-call shift was changed " f"from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(organization, user, OrganizationLogType.TYPE_ON_CALL_SHIFT_CHANGED, description)
+
+ def perform_destroy(self, instance):
+ organization = self.request.auth.organization
+ user = self.request.user
+ description = (
+ f"Custom on-call shift " f"with params: {instance.repr_settings_for_client_side_logging} was deleted"
+ )
+ create_organization_log(organization, user, OrganizationLogType.TYPE_ON_CALL_SHIFT_DELETED, description)
+ instance.delete()
diff --git a/engine/apps/public_api/views/organizations.py b/engine/apps/public_api/views/organizations.py
new file mode 100644
index 0000000000..d3bce01e35
--- /dev/null
+++ b/engine/apps/public_api/views/organizations.py
@@ -0,0 +1,42 @@
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.settings import api_settings
+from rest_framework.viewsets import ReadOnlyModelViewSet
+
+from apps.auth_token.auth import ApiTokenAuthentication
+from apps.public_api import constants as public_api_constants
+from apps.public_api.serializers import OrganizationSerializer
+from apps.public_api.throttlers.user_throttle import UserThrottle
+from apps.user_management.models import Organization
+from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin
+from common.api_helpers.paginators import TwentyFivePageSizePaginator
+
+
+class OrganizationView(
+ RateLimitHeadersMixin,
+ DemoTokenMixin,
+ ReadOnlyModelViewSet,
+):
+ authentication_classes = (ApiTokenAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ throttle_classes = [UserThrottle]
+
+ model = Organization
+ serializer_class = OrganizationSerializer
+
+ pagination_class = TwentyFivePageSizePaginator
+
+ demo_default_id = public_api_constants.DEMO_ORGANIZATION_ID
+
+ def get_queryset(self):
+ # It's a dirty hack to get queryset from the object. Just in case we'll return multiple teams in the future.
+ return Organization.objects.filter(pk=self.request.auth.organization.pk)
+
+ def get_object(self):
+ return self.request.auth.organization
+
+ def get_success_headers(self, data):
+ try:
+ return {"Location": str(data[api_settings.URL_FIELD_NAME])}
+ except (TypeError, KeyError):
+ return {}
diff --git a/engine/apps/public_api/views/personal_notifications.py b/engine/apps/public_api/views/personal_notifications.py
new file mode 100644
index 0000000000..0b3e0b0adb
--- /dev/null
+++ b/engine/apps/public_api/views/personal_notifications.py
@@ -0,0 +1,119 @@
+from rest_framework import status
+from rest_framework.exceptions import NotFound
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.viewsets import ModelViewSet
+
+from apps.auth_token.auth import ApiTokenAuthentication
+from apps.base.models import UserNotificationPolicy
+from apps.public_api import constants as public_api_constants
+from apps.public_api.serializers import PersonalNotificationRuleSerializer, PersonalNotificationRuleUpdateSerializer
+from apps.public_api.throttlers.user_throttle import UserThrottle
+from apps.user_management.models import User
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin, UpdateSerializerMixin
+from common.api_helpers.paginators import FiftyPageSizePaginator
+
+
+class PersonalNotificationView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSerializerMixin, ModelViewSet):
+ authentication_classes = (ApiTokenAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ throttle_classes = [UserThrottle]
+
+ model = UserNotificationPolicy
+ serializer_class = PersonalNotificationRuleSerializer
+ update_serializer_class = PersonalNotificationRuleUpdateSerializer
+
+ pagination_class = FiftyPageSizePaginator
+
+ demo_default_id = public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_1
+
+ def get_queryset(self):
+ user_id = self.request.query_params.get("user_id", None)
+ important = self.request.query_params.get("important", None)
+ organization = self.request.auth.organization
+ if user_id is not None:
+ if user_id != self.request.user.public_primary_key:
+ try:
+ User.objects.get(
+ public_primary_key=user_id,
+ organization=organization,
+ )
+ except User.DoesNotExist:
+ raise BadRequest(detail="User not found.")
+ queryset = UserNotificationPolicy.objects.filter(
+ user__public_primary_key=user_id,
+ user__organization=organization,
+ )
+ else:
+ queryset = UserNotificationPolicy.objects.filter(user__organization=organization).distinct()
+ if important is not None:
+ if important == "true":
+ queryset = queryset.filter(important=True)
+ elif important == "false":
+ queryset = queryset.filter(important=False)
+ else:
+ raise BadRequest(detail="Important is not bool")
+
+ queryset = self.serializer_class.setup_eager_loading(queryset)
+
+ return queryset.order_by("user", "important", "order")
+
+ def get_object(self):
+ public_primary_key = self.kwargs["pk"]
+ queryset = self.filter_queryset(self.get_queryset())
+ try:
+ return queryset.get(public_primary_key=public_primary_key)
+ except UserNotificationPolicy.DoesNotExist:
+ raise NotFound
+
+ def destroy(self, request, *args, **kwargs):
+ instance = self.get_object()
+ self.perform_destroy(instance)
+ return Response(status=status.HTTP_204_NO_CONTENT)
+
+ def perform_destroy(self, instance):
+ organization = self.request.auth.organization
+ user = self.request.user
+ old_state = user.repr_settings_for_client_side_logging
+ instance.delete()
+ new_state = user.repr_settings_for_client_side_logging
+ description = f"User settings for user {user.username} was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(
+ organization,
+ user,
+ OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
+ description,
+ )
+
+ def perform_create(self, serializer):
+ organization = self.request.auth.organization
+ author = self.request.user
+ user = serializer.validated_data["user"]
+
+ old_state = user.repr_settings_for_client_side_logging
+ serializer.save()
+ new_state = user.repr_settings_for_client_side_logging
+ description = f"User settings for user {user.username} was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(
+ organization,
+ author,
+ OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
+ description,
+ )
+
+ def perform_update(self, serializer):
+ organization = self.request.auth.organization
+ user = self.request.user
+ old_state = user.repr_settings_for_client_side_logging
+ serializer.save()
+ new_state = user.repr_settings_for_client_side_logging
+ description = f"User settings for user {user.username} was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(
+ organization,
+ user,
+ OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
+ description,
+ )
diff --git a/engine/apps/public_api/views/resolution_notes.py b/engine/apps/public_api/views/resolution_notes.py
new file mode 100644
index 0000000000..16e3fa41ca
--- /dev/null
+++ b/engine/apps/public_api/views/resolution_notes.py
@@ -0,0 +1,67 @@
+from django_filters.rest_framework import DjangoFilterBackend
+from rest_framework.exceptions import NotFound
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.viewsets import ModelViewSet
+
+from apps.alerts.models import ResolutionNote
+from apps.alerts.tasks import send_update_resolution_note_signal
+from apps.auth_token.auth import ApiTokenAuthentication
+from apps.public_api import constants as public_api_constants
+from apps.public_api.serializers.resolution_notes import ResolutionNoteSerializer, ResolutionNoteUpdateSerializer
+from apps.public_api.throttlers.user_throttle import UserThrottle
+from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin, UpdateSerializerMixin
+from common.api_helpers.paginators import FiftyPageSizePaginator
+
+
+class ResolutionNoteView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSerializerMixin, ModelViewSet):
+ authentication_classes = (ApiTokenAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ throttle_classes = [UserThrottle]
+
+ model = ResolutionNote
+ serializer_class = ResolutionNoteSerializer
+ update_serializer_class = ResolutionNoteUpdateSerializer
+
+ filter_backends = [DjangoFilterBackend]
+ filterset_fields = ["alert_group"]
+
+ pagination_class = FiftyPageSizePaginator
+
+ demo_default_id = public_api_constants.DEMO_RESOLUTION_NOTE_ID
+
+ def get_queryset(self):
+ alert_group_id = self.request.query_params.get("alert_group_id", None)
+ queryset = ResolutionNote.objects.filter(
+ alert_group__channel__organization=self.request.auth.organization,
+ )
+ queryset = self.serializer_class.setup_eager_loading(queryset)
+ if alert_group_id:
+ queryset = queryset.filter(alert_group__public_primary_key=alert_group_id)
+ return queryset.order_by("alert_group__started_at")
+
+ def get_object(self):
+ public_primary_key = self.kwargs["pk"]
+ queryset = self.filter_queryset(self.get_queryset())
+ try:
+ return queryset.get(public_primary_key=public_primary_key)
+ except ResolutionNote.DoesNotExist:
+ raise NotFound
+
+ def dispatch(self, request, *args, **kwargs):
+ result = super().dispatch(request, *args, **kwargs)
+
+ # send signal to update alert group and resolution_note
+ method = request.method.lower()
+ if method in ["post", "put", "patch", "delete"]:
+ instance_id = self.kwargs.get("pk") or result.data.get("id")
+ if instance_id:
+ instance = ResolutionNote.objects_with_deleted.filter(public_primary_key=instance_id).first()
+ if instance is not None:
+ send_update_resolution_note_signal.apply_async(
+ kwargs={
+ "alert_group_pk": instance.alert_group.pk,
+ "resolution_note_pk": instance.pk,
+ }
+ )
+ return result
diff --git a/engine/apps/public_api/views/routes.py b/engine/apps/public_api/views/routes.py
new file mode 100644
index 0000000000..a353a962c6
--- /dev/null
+++ b/engine/apps/public_api/views/routes.py
@@ -0,0 +1,105 @@
+from django_filters.rest_framework import DjangoFilterBackend
+from rest_framework import status
+from rest_framework.exceptions import NotFound
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.viewsets import ModelViewSet
+
+from apps.alerts.models import ChannelFilter
+from apps.auth_token.auth import ApiTokenAuthentication
+from apps.public_api import constants as public_api_constants
+from apps.public_api.serializers import ChannelFilterSerializer, ChannelFilterUpdateSerializer
+from apps.public_api.throttlers.user_throttle import UserThrottle
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.exceptions import BadRequest
+from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin, UpdateSerializerMixin
+from common.api_helpers.paginators import TwentyFivePageSizePaginator
+
+
+class ChannelFilterView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSerializerMixin, ModelViewSet):
+ authentication_classes = (ApiTokenAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ throttle_classes = [UserThrottle]
+
+ model = ChannelFilter
+ serializer_class = ChannelFilterSerializer
+ update_serializer_class = ChannelFilterUpdateSerializer
+
+ pagination_class = TwentyFivePageSizePaginator
+
+ filter_backends = [DjangoFilterBackend]
+ filterset_fields = ["alert_receive_channel"]
+
+ demo_default_id = public_api_constants.DEMO_ROUTE_ID_1
+
+ def get_queryset(self):
+ integration_id = self.request.query_params.get("integration_id", None)
+ routing_regex = self.request.query_params.get("routing_regex", None)
+
+ queryset = ChannelFilter.objects.filter(
+ alert_receive_channel__organization=self.request.auth.organization, alert_receive_channel__deleted_at=None
+ )
+
+ if integration_id:
+ queryset = queryset.filter(alert_receive_channel__public_primary_key=integration_id)
+ if routing_regex:
+ queryset = queryset.filter(filtering_term=routing_regex)
+ return queryset
+
+ def get_object(self):
+ public_primary_key = self.kwargs["pk"]
+
+ try:
+ return ChannelFilter.objects.filter(
+ alert_receive_channel__organization=self.request.auth.organization,
+ alert_receive_channel__deleted_at=None,
+ ).get(public_primary_key=public_primary_key)
+ except ChannelFilter.DoesNotExist:
+ raise NotFound
+
+ def destroy(self, request, *args, **kwargs):
+ instance = self.get_object()
+ if instance.is_default:
+ raise BadRequest(detail="Unable to delete default filter")
+ else:
+ alert_receive_channel = instance.alert_receive_channel
+ user = self.request.user
+ route_verbal = instance.verbal_name_for_clients.capitalize()
+ description = f"{route_verbal} of integration {alert_receive_channel.verbal_name} was deleted"
+ create_organization_log(
+ alert_receive_channel.organization,
+ user,
+ OrganizationLogType.TYPE_CHANNEL_FILTER_DELETED,
+ description,
+ )
+ self.perform_destroy(instance)
+ return Response(status=status.HTTP_204_NO_CONTENT)
+
+ def perform_create(self, serializer):
+ serializer.save()
+ instance = serializer.instance
+ alert_receive_channel = instance.alert_receive_channel
+ user = self.request.user
+ route_verbal = instance.verbal_name_for_clients.capitalize()
+ description = f"{route_verbal} was created for integration {alert_receive_channel.verbal_name}"
+ create_organization_log(
+ alert_receive_channel.organization,
+ user,
+ OrganizationLogType.TYPE_CHANNEL_FILTER_CREATED,
+ description,
+ )
+
+ def perform_update(self, serializer):
+ organization = self.request.auth.organization
+ user = self.request.user
+ old_state = serializer.instance.repr_settings_for_client_side_logging
+ serializer.save()
+ new_state = serializer.instance.repr_settings_for_client_side_logging
+ alert_receive_channel = serializer.instance.alert_receive_channel
+ route_verbal = serializer.instance.verbal_name_for_clients.capitalize()
+ description = (
+ f"Settings for {route_verbal} of integration {alert_receive_channel.verbal_name} "
+ f"was changed from:\n{old_state}\nto:\n{new_state}"
+ )
+ create_organization_log(organization, user, OrganizationLogType.TYPE_CHANNEL_FILTER_CHANGED, description)
diff --git a/engine/apps/public_api/views/schedules.py b/engine/apps/public_api/views/schedules.py
new file mode 100644
index 0000000000..16f6a17a22
--- /dev/null
+++ b/engine/apps/public_api/views/schedules.py
@@ -0,0 +1,111 @@
+from django_filters import rest_framework as filters
+from rest_framework import status
+from rest_framework.decorators import action
+from rest_framework.exceptions import NotFound
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.views import Response
+from rest_framework.viewsets import ModelViewSet
+
+from apps.auth_token.auth import ApiTokenAuthentication, ScheduleExportAuthentication
+from apps.public_api import constants as public_api_constants
+from apps.public_api.custom_renderers import CalendarRenderer
+from apps.public_api.serializers import PolymorphicScheduleSerializer, PolymorphicScheduleUpdateSerializer
+from apps.public_api.throttlers.user_throttle import UserThrottle
+from apps.schedules.ical_utils import ical_export_from_schedule
+from apps.schedules.models import OnCallSchedule
+from apps.slack.tasks import update_slack_user_group_for_schedules
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.api_helpers.filters import ByTeamFilter
+from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin, UpdateSerializerMixin
+from common.api_helpers.paginators import FiftyPageSizePaginator
+
+
+class OnCallScheduleChannelView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSerializerMixin, ModelViewSet):
+ authentication_classes = (ApiTokenAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ throttle_classes = [UserThrottle]
+
+ model = OnCallSchedule
+ serializer_class = PolymorphicScheduleSerializer
+ update_serializer_class = PolymorphicScheduleUpdateSerializer
+
+ pagination_class = FiftyPageSizePaginator
+
+ demo_default_id = public_api_constants.DEMO_SCHEDULE_ID_ICAL
+
+ filter_backends = (filters.DjangoFilterBackend,)
+ filterset_class = ByTeamFilter
+
+ def get_queryset(self):
+ name = self.request.query_params.get("name", None)
+
+ queryset = OnCallSchedule.objects.filter(organization=self.request.auth.organization)
+
+ if name is not None:
+ queryset = queryset.filter(name=name)
+
+ return queryset.order_by("id")
+
+ def get_object(self):
+ public_primary_key = self.kwargs["pk"]
+
+ try:
+ return OnCallSchedule.objects.filter(
+ organization=self.request.auth.organization,
+ ).get(public_primary_key=public_primary_key)
+ except OnCallSchedule.DoesNotExist:
+ raise NotFound
+
+ def perform_create(self, serializer):
+ serializer.save()
+ instance = serializer.instance
+
+ if instance.user_group is not None:
+ update_slack_user_group_for_schedules.apply_async((instance.user_group.pk,))
+
+ organization = self.request.auth.organization
+ user = self.request.user
+ description = f"Schedule {instance.name} was created"
+ create_organization_log(organization, user, OrganizationLogType.TYPE_SCHEDULE_CREATED, description)
+
+ def perform_update(self, serializer):
+ organization = self.request.auth.organization
+ user = self.request.user
+ old_state = serializer.instance.repr_settings_for_client_side_logging
+ old_user_group = serializer.instance.user_group
+
+ updated_schedule = serializer.save()
+
+ if old_user_group is not None:
+ update_slack_user_group_for_schedules.apply_async((old_user_group.pk,))
+
+ if updated_schedule.user_group is not None and updated_schedule.user_group != old_user_group:
+ update_slack_user_group_for_schedules.apply_async((updated_schedule.user_group.pk,))
+
+ new_state = serializer.instance.repr_settings_for_client_side_logging
+ description = f"Schedule {serializer.instance.name} was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(organization, user, OrganizationLogType.TYPE_SCHEDULE_CHANGED, description)
+
+ def perform_destroy(self, instance):
+ organization = self.request.auth.organization
+ user = self.request.user
+ description = f"Schedule {instance.name} was deleted"
+ create_organization_log(organization, user, OrganizationLogType.TYPE_SCHEDULE_DELETED, description)
+
+ instance.delete()
+
+ if instance.user_group is not None:
+ update_slack_user_group_for_schedules.apply_async((instance.user_group.pk,))
+
+ @action(
+ methods=["get"],
+ detail=True,
+ renderer_classes=(CalendarRenderer,),
+ authentication_classes=(ScheduleExportAuthentication,),
+ permission_classes=(IsAuthenticated,),
+ )
+ def export(self, request, pk):
+ # Not using existing get_object method because it requires access to the organization user attribute
+ export = ical_export_from_schedule(self.request.auth.schedule)
+ return Response(export, status=status.HTTP_200_OK)
diff --git a/engine/apps/public_api/views/slack_channels.py b/engine/apps/public_api/views/slack_channels.py
new file mode 100644
index 0000000000..f261f0b65d
--- /dev/null
+++ b/engine/apps/public_api/views/slack_channels.py
@@ -0,0 +1,32 @@
+from rest_framework import mixins
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.viewsets import GenericViewSet
+
+from apps.auth_token.auth import ApiTokenAuthentication
+from apps.public_api.serializers.slack_channel import SlackChannelSerializer
+from apps.public_api.throttlers.user_throttle import UserThrottle
+from apps.slack.models import SlackChannel
+from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin
+from common.api_helpers.paginators import FiftyPageSizePaginator
+
+
+class SlackChannelView(RateLimitHeadersMixin, DemoTokenMixin, mixins.ListModelMixin, GenericViewSet):
+ authentication_classes = (ApiTokenAuthentication,)
+ permission_classes = (IsAuthenticated,)
+ pagination_class = FiftyPageSizePaginator
+ throttle_classes = [UserThrottle]
+
+ model = SlackChannel
+ serializer_class = SlackChannelSerializer
+
+ def get_queryset(self):
+ channel_name = self.request.query_params.get("channel_name", None)
+
+ queryset = SlackChannel.objects.filter(
+ slack_team_identity__organizations=self.request.auth.organization
+ ).distinct()
+
+ if channel_name:
+ queryset = queryset.filter(name=channel_name)
+
+ return queryset
diff --git a/engine/apps/public_api/views/teams.py b/engine/apps/public_api/views/teams.py
new file mode 100644
index 0000000000..c42e04606b
--- /dev/null
+++ b/engine/apps/public_api/views/teams.py
@@ -0,0 +1,27 @@
+from rest_framework import viewsets
+from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
+from rest_framework.permissions import IsAuthenticated
+
+from apps.auth_token.auth import ApiTokenAuthentication
+from apps.public_api.serializers.teams import TeamSerializer
+from apps.public_api.throttlers.user_throttle import UserThrottle
+from apps.user_management.models import Team
+from common.api_helpers.mixins import PublicPrimaryKeyMixin
+from common.api_helpers.paginators import FiftyPageSizePaginator
+
+
+class TeamView(PublicPrimaryKeyMixin, RetrieveModelMixin, ListModelMixin, viewsets.GenericViewSet):
+ serializer_class = TeamSerializer
+ authentication_classes = (ApiTokenAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ model = Team
+ pagination_class = FiftyPageSizePaginator
+ throttle_classes = [UserThrottle]
+
+ def get_queryset(self):
+ name = self.request.query_params.get("name", None)
+ queryset = self.request.auth.organization.teams.all()
+ if name:
+ queryset = queryset.filter(name=name)
+ return queryset
diff --git a/engine/apps/public_api/views/user_groups.py b/engine/apps/public_api/views/user_groups.py
new file mode 100644
index 0000000000..4e6bbaf3ad
--- /dev/null
+++ b/engine/apps/public_api/views/user_groups.py
@@ -0,0 +1,29 @@
+from rest_framework import mixins
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.viewsets import GenericViewSet
+
+from apps.auth_token.auth import ApiTokenAuthentication
+from apps.public_api.serializers.user_groups import UserGroupSerializer
+from apps.public_api.throttlers.user_throttle import UserThrottle
+from apps.slack.models import SlackUserGroup
+from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin
+from common.api_helpers.paginators import FiftyPageSizePaginator
+
+
+class UserGroupView(RateLimitHeadersMixin, DemoTokenMixin, mixins.ListModelMixin, GenericViewSet):
+ authentication_classes = (ApiTokenAuthentication,)
+ permission_classes = (IsAuthenticated,)
+ pagination_class = FiftyPageSizePaginator
+ throttle_classes = [UserThrottle]
+
+ model = SlackUserGroup
+ serializer_class = UserGroupSerializer
+
+ def get_queryset(self):
+ slack_handle = self.request.query_params.get("slack_handle", None)
+ queryset = SlackUserGroup.objects.filter(
+ slack_team_identity__organizations=self.request.auth.organization
+ ).distinct()
+ if slack_handle:
+ queryset = queryset.filter(handle=slack_handle)
+ return queryset
diff --git a/engine/apps/public_api/views/users.py b/engine/apps/public_api/views/users.py
new file mode 100644
index 0000000000..815c6553f5
--- /dev/null
+++ b/engine/apps/public_api/views/users.py
@@ -0,0 +1,71 @@
+from rest_framework.decorators import action
+from rest_framework.exceptions import NotFound
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.views import Response
+from rest_framework.viewsets import ReadOnlyModelViewSet
+
+from apps.auth_token.auth import ApiTokenAuthentication, UserScheduleExportAuthentication
+from apps.public_api import constants as public_api_constants
+from apps.public_api.custom_renderers import CalendarRenderer
+from apps.public_api.serializers import FastUserSerializer, UserSerializer
+from apps.public_api.throttlers.user_throttle import UserThrottle
+from apps.schedules.ical_utils import user_ical_export
+from apps.schedules.models import OnCallSchedule
+from apps.user_management.models import User
+from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin, ShortSerializerMixin
+from common.api_helpers.paginators import HundredPageSizePaginator
+from common.constants.role import Role
+
+
+class UserView(RateLimitHeadersMixin, ShortSerializerMixin, DemoTokenMixin, ReadOnlyModelViewSet):
+ authentication_classes = (ApiTokenAuthentication,)
+ permission_classes = (IsAuthenticated,)
+
+ model = User
+ pagination_class = HundredPageSizePaginator
+
+ serializer_class = UserSerializer
+ short_serializer_class = FastUserSerializer
+
+ throttle_classes = [UserThrottle]
+
+ demo_default_id = public_api_constants.DEMO_USER_ID
+
+ def get_queryset(self):
+ username = self.request.query_params.get("username")
+ is_short_request = self.request.query_params.get("short", "false") == "true"
+ queryset = self.request.auth.organization.users.filter(role__in=[Role.ADMIN, Role.EDITOR]).distinct()
+
+ if username is not None:
+ queryset = queryset.filter(username=username)
+
+ if not is_short_request:
+ queryset = self.serializer_class.setup_eager_loading(queryset)
+ return queryset.order_by("id")
+
+ def get_object(self):
+ public_primary_key = self.kwargs["pk"]
+
+ if public_primary_key == "current":
+ return self.request.user
+
+ organization = self.request.auth.organization
+
+ try:
+ user = User.objects.get(public_primary_key=public_primary_key, organization=organization)
+ except User.DoesNotExist:
+ raise NotFound
+
+ return user
+
+ @action(
+ methods=["get"],
+ detail=True,
+ renderer_classes=(CalendarRenderer,),
+ authentication_classes=(UserScheduleExportAuthentication,),
+ permission_classes=(IsAuthenticated,),
+ )
+ def schedule_export(self, request, pk):
+ schedules = OnCallSchedule.objects.filter(organization=self.request.auth.organization)
+ export = user_ical_export(self.request.user, schedules)
+ return Response(export)
diff --git a/engine/apps/schedules/__init__.py b/engine/apps/schedules/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/schedules/admin.py b/engine/apps/schedules/admin.py
new file mode 100644
index 0000000000..545d0b8cd6
--- /dev/null
+++ b/engine/apps/schedules/admin.py
@@ -0,0 +1,9 @@
+from django.contrib import admin
+
+from apps.schedules.models import OnCallSchedule
+from common.admin import CustomModelAdmin
+
+
+@admin.register(OnCallSchedule)
+class OnCallScheduleAdmin(CustomModelAdmin):
+ list_display = ("id", "public_primary_key", "name")
diff --git a/engine/apps/schedules/ical_events/__init__.py b/engine/apps/schedules/ical_events/__init__.py
new file mode 100644
index 0000000000..79d4a802ce
--- /dev/null
+++ b/engine/apps/schedules/ical_events/__init__.py
@@ -0,0 +1,5 @@
+from apps.schedules.ical_events.adapter.amixr_recurring_ical_events_adapter import AmixrRecurringIcalEventsAdapter
+from apps.schedules.ical_events.proxy.ical_proxy import IcalProxy
+
+adapter = AmixrRecurringIcalEventsAdapter()
+ical_events = IcalProxy(adapter)
diff --git a/engine/apps/schedules/ical_events/adapter/__init__.py b/engine/apps/schedules/ical_events/adapter/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/schedules/ical_events/adapter/amixr_recurring_ical_events_adapter.py b/engine/apps/schedules/ical_events/adapter/amixr_recurring_ical_events_adapter.py
new file mode 100644
index 0000000000..4d17f49701
--- /dev/null
+++ b/engine/apps/schedules/ical_events/adapter/amixr_recurring_ical_events_adapter.py
@@ -0,0 +1,88 @@
+from collections import defaultdict
+from datetime import datetime
+from typing import List
+
+from django.utils import timezone
+from icalendar import Calendar, Event
+from recurring_ical_events import UnfoldableCalendar, compare_greater, is_event, time_span_contains_event
+
+from apps.schedules.ical_events.proxy.ical_proxy import IcalService
+
+EXTRA_LOOKUP_DAYS = 16
+
+
+class AmixrUnfoldableCalendar(UnfoldableCalendar):
+ """
+ This is overridden recurring_ical_events.UnfoldableCalendar.
+ It is overridden because of bug when summary of recurring event stay the same after editing.
+ In recurring-ical-events==0.1.20b0 this problem is fixed, but all-day events without timezone lead to exception.
+ So i took part of code from 0.1.20b0 but leave 0.1.16b in requirements.
+ """
+
+ def between(self, start, stop):
+ """Return events at a time between start (inclusive) and end (inclusive)"""
+ span_start = self.to_datetime(start)
+ span_stop = self.to_datetime(stop)
+ events = []
+ events_by_id = defaultdict(dict) # UID (str) : RECURRENCE-ID(datetime) : event (Event)
+ default_uid = object()
+
+ def add_event(event):
+ """Add an event and check if it was edited."""
+ same_events = events_by_id[event.get("UID", default_uid)]
+ recurrence_id = event.get("RECURRENCE-ID", event["DTSTART"]).dt
+ # Start of code from 0.1.20b0
+ if isinstance(recurrence_id, datetime):
+ recurrence_id = recurrence_id.date()
+ other = same_events.get(recurrence_id, None)
+ if other:
+ event_recurrence_id = event.get("RECURRENCE-ID", None)
+ other_recurrence_id = other.get("RECURRENCE-ID", None)
+ if event_recurrence_id is not None and other_recurrence_id is None:
+ events.remove(other)
+ elif event_recurrence_id is None and other_recurrence_id is not None:
+ return
+ else:
+ event_sequence = event.get("SEQUENCE", None)
+ other_sequence = other.get("SEQUENCE", None)
+ if event_sequence is not None and other_sequence is not None:
+ if event["SEQUENCE"] < other["SEQUENCE"]:
+ return
+ events.remove(other)
+ # End of code from 0.1.20b0
+ same_events[recurrence_id] = event
+ events.append(event)
+
+ for event in self.calendar.walk():
+ if not is_event(event):
+ continue
+ repetitions = self.RepeatedEvent(event, span_start)
+ for repetition in repetitions:
+ if compare_greater(repetition.start, span_stop):
+ break
+ if repetition.is_in_span(span_start, span_stop):
+ add_event(repetition.as_vevent())
+ return events
+
+
+class AmixrRecurringIcalEventsAdapter(IcalService):
+ def get_events_from_ical_between(self, calendar: Calendar, start_date: datetime, end_date: datetime) -> List[Event]:
+ """
+ EXTRA_LOOKUP_DAYS introduced to solve bug when swapping two recurrent events with each other lead
+ to their duplicates in case end_date - start_date < recurrent_event duration.
+ It is happening because for such swap new event is created in ical with same RECURRENCE_ID as original and greater SEQUENCE param.
+ If one of these events misses lookup window we can't to take into account SEQUENCE value
+ and use only event with higher SEQUENCE value.
+ Solution is to lookup for EXTRA_LOOKUP_DAYS forward and back and then
+ make one more pass for events array to filter out events which are between start_date and end_date.
+ EXTRA_LOOKUP_DAYS is empirical.
+ """
+ events = AmixrUnfoldableCalendar(calendar).between(
+ start_date - timezone.timedelta(days=EXTRA_LOOKUP_DAYS),
+ end_date + timezone.timedelta(days=EXTRA_LOOKUP_DAYS),
+ )
+
+ def filter_extra_days(event):
+ return time_span_contains_event(start_date, end_date, event["DTSTART"].dt, event["DTEND"].dt)
+
+ return list(filter(filter_extra_days, events))
diff --git a/engine/apps/schedules/ical_events/adapter/recurring_ical_events_adapter.py b/engine/apps/schedules/ical_events/adapter/recurring_ical_events_adapter.py
new file mode 100644
index 0000000000..ee17d51abf
--- /dev/null
+++ b/engine/apps/schedules/ical_events/adapter/recurring_ical_events_adapter.py
@@ -0,0 +1,17 @@
+from datetime import datetime
+from typing import List
+
+import recurring_ical_events
+from icalendar import Calendar, Event
+
+from apps.schedules.ical_events.proxy.ical_proxy import IcalService
+
+
+class RecurringIcalEventsAdapter(IcalService):
+ """
+ Adapter of pure recurring_ical_events library as it was used before implementing Ical Adapters.
+ Not recommended for use.
+ """
+
+ def get_events_from_ical_between(self, calendar: Calendar, start_date: datetime, end_date: datetime) -> List[Event]:
+ return recurring_ical_events.of(calendar).between(start_date, end_date)
diff --git a/engine/apps/schedules/ical_events/proxy/__init__.py b/engine/apps/schedules/ical_events/proxy/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/schedules/ical_events/proxy/ical_proxy.py b/engine/apps/schedules/ical_events/proxy/ical_proxy.py
new file mode 100644
index 0000000000..a569c90550
--- /dev/null
+++ b/engine/apps/schedules/ical_events/proxy/ical_proxy.py
@@ -0,0 +1,19 @@
+from abc import ABC, abstractmethod
+from datetime import datetime
+from typing import List
+
+from icalendar import Calendar, Event
+
+
+class IcalService(ABC):
+ @abstractmethod
+ def get_events_from_ical_between(self, calendar: Calendar, start_date: datetime, end_date: datetime) -> List[Event]:
+ raise NotImplementedError
+
+
+class IcalProxy(IcalService):
+ def __init__(self, ical_adapter: IcalService):
+ self.ical_adapter = ical_adapter
+
+ def get_events_from_ical_between(self, calendar: Calendar, start_date: datetime, end_date: datetime) -> List[Event]:
+ return self.ical_adapter.get_events_from_ical_between(calendar, start_date, end_date)
diff --git a/engine/apps/schedules/ical_utils.py b/engine/apps/schedules/ical_utils.py
new file mode 100644
index 0000000000..309729cdbd
--- /dev/null
+++ b/engine/apps/schedules/ical_utils.py
@@ -0,0 +1,696 @@
+from __future__ import annotations
+
+import datetime
+import logging
+import re
+from collections import namedtuple
+from typing import TYPE_CHECKING
+
+import pytz
+import requests
+from django.apps import apps
+from django.db.models import Q
+from django.utils import timezone
+from icalendar import Calendar
+
+from apps.schedules.ical_events import ical_events
+from common.constants.role import Role
+from common.utils import timed_lru_cache
+
+"""
+This is a hack to allow us to load models for type checking without circular dependencies.
+This module likely needs to refactored to be part of the OnCallSchedule module.
+"""
+if TYPE_CHECKING:
+ from apps.schedules.models import OnCallSchedule
+ from apps.user_management.models import User
+
+
+def users_in_ical(usernames_from_ical, organization):
+ """
+ Parse ical file and return list of users found
+ """
+ # Only grafana username will be used, consider adding grafana email and id
+
+ users_found_in_ical = organization.users.filter(
+ Q(role__in=(Role.ADMIN, Role.EDITOR)) & (Q(username__in=usernames_from_ical) | Q(email__in=usernames_from_ical))
+ ).distinct()
+
+ # Here is the example how we extracted users previously, using slack fields too
+ # user_roles_found_in_ical = team.org_user_role.filter(role__in=[ROLE_ADMIN, ROLE_USER]).filter(
+ # Q(
+ # Q(amixr_user__slack_user_identities__slack_team_identity__amixr_team=team) &
+ # Q(
+ # Q(amixr_user__slack_user_identities__profile_display_name__in=usernames_from_ical) |
+ # Q(amixr_user__slack_user_identities__cached_name__in=usernames_from_ical) |
+ # Q(amixr_user__slack_user_identities__slack_id__in=[username.split(" ")[0] for username in
+ # usernames_from_ical]) |
+ # Q(amixr_user__slack_user_identities__cached_slack_login__in=usernames_from_ical) |
+ # Q(amixr_user__slack_user_identities__profile_real_name__in=usernames_from_ical)
+ # )
+ # )
+ # |
+ # Q(username__in=usernames_from_ical)
+ # ).annotate(is_deleted_sui=Subquery(slack_user_identity_subquery.values("deleted")[:1])).filter(
+ # ~Q(is_deleted_sui=True) | Q(is_deleted_sui__isnull=True)).distinct()
+ # return user_roles_found_in_ical
+
+ return users_found_in_ical
+
+
+@timed_lru_cache(timeout=100)
+def memoized_users_in_ical(usernames_from_ical, organization):
+ # using in-memory cache instead of redis to avoid pickling python objects
+ return users_in_ical(usernames_from_ical, organization)
+
+
+ICAL_DATETIME_START = "DTSTART"
+ICAL_DATETIME_END = "DTEND"
+ICAL_SUMMARY = "SUMMARY"
+ICAL_DESCRIPTION = "DESCRIPTION"
+ICAL_ATTENDEE = "ATTENDEE"
+ICAL_UID = "UID"
+RE_PRIORITY = re.compile(r"^\[L(\d)\]")
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+# used for display schedule events on web
+def list_of_oncall_shifts_from_ical(schedule, date, user_timezone="UTC", with_empty_shifts=False, with_gaps=False):
+ """
+ Parse the ical file and return list of events with users
+ This function is used in serializer for api schedules/events/ endpoint
+ Example output:
+ [
+ {
+ "start": datetime.datetime(2021, 7, 8, 5, 30, tzinfo=,
+ "end": datetime.datetime(2021, 7, 8, 13, 15, tzinfo=),
+ "users": ]>,
+ "priority": 0,
+ "source": None,
+ "calendar_type": 0
+ }
+ ]
+ """
+ OnCallSchedule = apps.get_model("schedules", "OnCallSchedule")
+
+ # get list of iCalendars from current iCal files. If there is more than one calendar, primary calendar will always
+ # be the first
+ calendars = schedule.get_icalendars()
+
+ # TODO: Review offset usage
+ user_timezone_offset = timezone.datetime.now().astimezone(pytz.timezone(user_timezone)).utcoffset()
+ datetime_min = timezone.datetime.combine(date, datetime.time.min) + timezone.timedelta(milliseconds=1)
+ datetime_start = (datetime_min - user_timezone_offset).astimezone(pytz.UTC)
+ datetime_end = datetime_start + timezone.timedelta(hours=23, minutes=59, seconds=59)
+
+ result_datetime = []
+ result_date = []
+
+ for idx, calendar in enumerate(calendars):
+ if calendar is not None:
+ if idx == 0:
+ calendar_type = OnCallSchedule.PRIMARY
+ else:
+ calendar_type = OnCallSchedule.OVERRIDES
+
+ tmp_result_datetime, tmp_result_date = get_shifts_dict(
+ calendar, calendar_type, schedule, datetime_start, datetime_end, date, with_empty_shifts
+ )
+ result_datetime.extend(tmp_result_datetime)
+ result_date.extend(tmp_result_date)
+
+ if with_gaps and len(result_date) == 0:
+ as_intervals = [DatetimeInterval(shift["start"], shift["end"]) for shift in result_datetime]
+ gaps = detect_gaps(as_intervals, datetime_start, datetime_end)
+ for g in gaps:
+ result_datetime.append(
+ {
+ "start": g.start if g.start else datetime_start,
+ "end": g.end if g.end else datetime_end,
+ "users": [],
+ "priority": None,
+ "source": None,
+ "calendar_type": None,
+ "is_gap": True,
+ }
+ )
+ result = sorted(result_datetime, key=lambda dt: dt["start"]) + result_date
+ # if there is no events, return None
+ return result or None
+
+
+def get_shifts_dict(calendar, calendar_type, schedule, datetime_start, datetime_end, date, with_empty_shifts=False):
+ events = ical_events.get_events_from_ical_between(calendar, datetime_start, datetime_end)
+ result_datetime = []
+ result_date = []
+ for event in events:
+ priority = parse_priority_from_string(event.get(ICAL_SUMMARY, "[L0]"))
+ source = parse_source_from_string(event.get(ICAL_UID))
+ users = get_users_from_ical_event(event, schedule.organization)
+ # Define on-call shift out of ical event that has the actual user
+ if len(users) > 0 or with_empty_shifts:
+ if type(event[ICAL_DATETIME_START].dt) == datetime.date:
+ start = event[ICAL_DATETIME_START].dt
+ end = event[ICAL_DATETIME_END].dt
+ if start <= date < end:
+ result_date.append(
+ {
+ "start": start,
+ "end": end,
+ "users": users,
+ "priority": priority,
+ "source": source,
+ "calendar_type": calendar_type,
+ }
+ )
+ else:
+ start = event[ICAL_DATETIME_START].dt.astimezone(pytz.UTC)
+ end = event[ICAL_DATETIME_END].dt.astimezone(pytz.UTC)
+
+ result_datetime.append(
+ {
+ "start": start,
+ "end": end,
+ "users": users,
+ "priority": priority,
+ "source": source,
+ "calendar_type": calendar_type,
+ }
+ )
+ return result_datetime, result_date
+
+
+EmptyShift = namedtuple(
+ "EmptyShift", ["start", "end", "summary", "description", "attendee", "all_day", "calendar_type", "calendar_tz"]
+)
+
+
+def list_of_empty_shifts_in_schedule(schedule, start_date, end_date):
+ """
+ Parse the ical file and return list of EmptyShift.
+ """
+ # Calculate lookup window in schedule's tz
+ # If we can't get tz from ical use UTC
+ OnCallSchedule = apps.get_model("schedules", "OnCallSchedule")
+
+ calendars = schedule.get_icalendars()
+ empty_shifts = []
+ for idx, calendar in enumerate(calendars):
+ if calendar is not None:
+ if idx == 0:
+ calendar_type = OnCallSchedule.PRIMARY
+ else:
+ calendar_type = OnCallSchedule.OVERRIDES
+
+ calendar_tz = get_icalendar_tz_or_utc(calendar)
+
+ schedule_timezone_offset = timezone.datetime.now().astimezone(calendar_tz).utcoffset()
+ start_datetime = timezone.datetime.combine(start_date, datetime.time.min) + timezone.timedelta(
+ milliseconds=1
+ )
+ start_datetime_with_offset = (start_datetime - schedule_timezone_offset).astimezone(pytz.UTC)
+ end_datetime = timezone.datetime.combine(end_date, datetime.time.max)
+ end_datetime_with_offset = (end_datetime - schedule_timezone_offset).astimezone(pytz.UTC)
+
+ events = ical_events.get_events_from_ical_between(
+ calendar, start_datetime_with_offset, end_datetime_with_offset
+ )
+
+ # Keep hashes of checked events to include only first recurrent event into result
+ checked_events = set()
+ empty_shifts_per_calendar = []
+ for event in events:
+ users = get_users_from_ical_event(event, schedule.organization)
+ if len(users) == 0:
+ summary = event.get(ICAL_SUMMARY, "")
+ description = event.get(ICAL_DESCRIPTION, "")
+ attendee = event.get(ICAL_ATTENDEE, "")
+
+ event_hash = hash(f"{event[ICAL_UID]}{summary}{description}{attendee}")
+ if event_hash in checked_events:
+ continue
+
+ checked_events.add(event_hash)
+
+ all_day = False
+ if type(event[ICAL_DATETIME_START].dt) == datetime.date:
+ # Convert all-day events start and end from date to datetime with calendar's tz
+ start, _ = ical_date_to_datetime(event["DTSTART"].dt, calendar_tz, start=True)
+ end, _ = ical_date_to_datetime(event["DTEND"].dt, calendar_tz, start=False)
+ all_day = True
+ else:
+ start = event[ICAL_DATETIME_START].dt.astimezone(pytz.UTC)
+ end = event[ICAL_DATETIME_END].dt.astimezone(pytz.UTC)
+
+ empty_shifts_per_calendar.append(
+ EmptyShift(
+ start=start,
+ end=end,
+ summary=summary,
+ description=description,
+ attendee=attendee,
+ all_day=all_day,
+ calendar_type=calendar_type,
+ calendar_tz=calendar_tz,
+ )
+ )
+ empty_shifts.extend(empty_shifts_per_calendar)
+
+ return sorted(empty_shifts, key=lambda dt: dt.start)
+
+
+def list_users_to_notify_from_ical(schedule, events_datetime=None):
+ """
+ Retrieve on-call users for the current time
+ """
+ events_datetime = events_datetime if events_datetime else timezone.datetime.now(timezone.utc)
+ return list_users_to_notify_from_ical_for_period(schedule, events_datetime, events_datetime)
+
+
+def list_users_to_notify_from_ical_for_period(schedule, start_datetime, end_datetime):
+ # get list of iCalendars from current iCal files. If there is more than one calendar, primary calendar will always
+ # be the first
+ calendars = schedule.get_icalendars()
+ # reverse calendars to make overrides calendar the first, if schedule is iCal
+ calendars = calendars[::-1]
+ users_found_in_ical = []
+ # at first check overrides calendar and return users from it if it exists and on-call users are found
+ for calendar in calendars:
+ if calendar is None:
+ continue
+ events = ical_events.get_events_from_ical_between(calendar, start_datetime, end_datetime)
+ parsed_ical_events = {} # event info where key is event priority and value list of found usernames {0:["alex"]}
+ for event in events:
+ current_usernames, current_priority = get_usernames_from_ical_event(event)
+ parsed_ical_events.setdefault(current_priority, []).extend(current_usernames)
+ # find users by usernames. if users are not found for shift, get users from lower priority
+ for _, usernames in sorted(parsed_ical_events.items(), reverse=True):
+ users_found_in_ical = users_in_ical(usernames, schedule.organization)
+ if users_found_in_ical:
+ break
+ if users_found_in_ical:
+ # if users are found in the overrides calendar, there is no need to check primary calendar
+ break
+ return users_found_in_ical
+
+
+def parse_username_from_string(string):
+ """
+ Parse on-call shift user from the given string
+ Example input:
+ [L1] bob@company.com
+ Example output:
+ bob@company.com
+ """
+ return re.sub(RE_PRIORITY, "", string.strip(), 1).strip()
+
+
+def parse_priority_from_string(string):
+ """
+ Parse on-call shift priority from the given string
+ Example input:
+ [L1] @alex @bob
+ Example output:
+ 1
+ """
+ priority = re.findall(RE_PRIORITY, string.strip())
+ if len(priority) > 0:
+ priority = int(priority[0])
+ if priority < 1:
+ priority = 0
+ else:
+ priority = 0
+ return priority
+
+
+def parse_source_from_string(string):
+ CustomOnCallShift = apps.get_model("schedules", "CustomOnCallShift")
+ split_string = string.split("-")
+ source_verbal = None
+ if len(split_string) >= 2 and split_string[0] == "amixr":
+ regex = re.compile(r"^S(\d)$")
+ source = re.findall(regex, split_string[-1])
+ if len(source) > 0:
+ source = int(source[0])
+ source_verbal = CustomOnCallShift.SOURCE_CHOICES[source][1]
+ return source_verbal
+
+
+def get_usernames_from_ical_event(event):
+ usernames_found = []
+ priority = parse_priority_from_string(event.get(ICAL_SUMMARY, "[L0]"))
+ if ICAL_SUMMARY in event:
+ usernames_found.append(parse_username_from_string(event[ICAL_SUMMARY]))
+ if ICAL_DESCRIPTION in event:
+ usernames_found.append(parse_username_from_string(event[ICAL_DESCRIPTION]))
+ if ICAL_ATTENDEE in event:
+ if type(event[ICAL_ATTENDEE]) is str:
+ # PagerDuty adds only one attendee and in this case event[ICAL_ATTENDEE] is string.
+ # If several attendees were added to the event than event[ICAL_ATTENDEE] will be list
+ # (E.g. several invited in Google cal).
+ usernames_found.append(parse_username_from_string(event[ICAL_ATTENDEE]))
+ return usernames_found, priority
+
+
+def get_users_from_ical_event(event, organization):
+ usernames_from_ical, _ = get_usernames_from_ical_event(event)
+ users = []
+ if len(usernames_from_ical) != 0:
+ users = memoized_users_in_ical(tuple(usernames_from_ical), organization)
+ return users
+
+
+def is_icals_equal(first, second):
+ first = first.split("\n")
+ second = second.split("\n")
+ if len(first) != len(second):
+ return False
+ else:
+ for idx, first_item in enumerate(first):
+ if first_item.startswith("DTSTAMP"):
+ continue
+ else:
+ second_item = second[idx]
+ if first_item != second_item:
+ return False
+
+ return True
+
+
+def ical_date_to_datetime(date, tz, start):
+ datetime_to_combine = datetime.time.min
+ all_day = False
+ if type(date) == datetime.date:
+ all_day = True
+ calendar_timezone_offset = timezone.datetime.now().astimezone(tz).utcoffset()
+ date = timezone.datetime.combine(date, datetime_to_combine).astimezone(tz) - calendar_timezone_offset
+ if not start:
+ date -= timezone.timedelta(seconds=1)
+ return date, all_day
+
+
+def calculate_shift_diff(first_shift, second_shift):
+ fields_to_compare = ["users", "end", "start", "all_day", "priority"]
+
+ shift_changed = set(first_shift.keys()) != set(second_shift.keys())
+ if not shift_changed:
+ diff = set()
+ for k, v in first_shift.items():
+ for f in fields_to_compare:
+ if v.get(f) != second_shift[k].get(f):
+ shift_changed = True
+ diff.add(k)
+ break
+ else:
+ diff = set(first_shift.keys()) - set(second_shift.keys())
+
+ return shift_changed, diff
+
+
+def get_icalendar_tz_or_utc(icalendar):
+ try:
+ calendar_timezone = icalendar.walk("VTIMEZONE")[0]["TZID"]
+ except (IndexError, KeyError):
+ calendar_timezone = "UTC"
+
+ try:
+ return pytz.timezone(calendar_timezone)
+ except pytz.UnknownTimeZoneError:
+ # try to convert the timezone from windows to iana
+ converted_timezone = convert_windows_timezone_to_iana(calendar_timezone)
+ if converted_timezone is None:
+ return "UTC"
+ return pytz.timezone(converted_timezone)
+
+
+def fetch_ical_file_or_get_error(ical_url):
+ cached_ical_file = None
+ ical_file_error = None
+ try:
+ new_ical_file = requests.get(ical_url, timeout=10).text
+ Calendar.from_ical(new_ical_file)
+ cached_ical_file = new_ical_file
+ except requests.exceptions.RequestException:
+ ical_file_error = "iCal download failed"
+ except ValueError:
+ ical_file_error = "wrong iCal"
+ # TODO: catch icalendar exceptions
+ return cached_ical_file, ical_file_error
+
+
+def create_base_icalendar(name: str) -> Calendar:
+ cal = Calendar()
+ cal.add("calscale", "GREGORIAN")
+ cal.add("x-wr-calname", name)
+ cal.add("x-wr-timezone", "UTC")
+ cal.add("prodid", "//Grafana Labs//Grafana On-Call//")
+
+ return cal
+
+
+def get_events_from_calendars(ical_obj: Calendar, calendars: tuple) -> None:
+ for calendar in calendars:
+ if calendar:
+ for component in calendar.walk():
+ if component.name == "VEVENT":
+ ical_obj.add_component(component)
+
+
+def get_user_events_from_calendars(ical_obj: Calendar, calendars: tuple, user: User) -> None:
+ for calendar in calendars:
+ if calendar:
+ for component in calendar.walk():
+ if component.name == "VEVENT":
+ event_user = get_usernames_from_ical_event(component)
+ if event_user[0][0] in [user.username, user.email]:
+ ical_obj.add_component(component)
+
+
+def ical_export_from_schedule(schedule: OnCallSchedule) -> bytes:
+ calendars = schedule.get_icalendars()
+ ical_obj = create_base_icalendar(schedule.name)
+ get_events_from_calendars(ical_obj, calendars)
+ return ical_obj.to_ical()
+
+
+def user_ical_export(user: User, schedules: list[OnCallSchedule]) -> bytes:
+ schedule_name = "On-Call Schedule for {0}".format(user.username)
+ ical_obj = create_base_icalendar(schedule_name)
+
+ for schedule in schedules:
+ calendars = schedule.get_icalendars()
+ get_user_events_from_calendars(ical_obj, calendars, user)
+
+ return ical_obj.to_ical()
+
+
+DatetimeInterval = namedtuple("DatetimeInterval", ["start", "end"])
+
+
+def list_of_gaps_in_schedule(schedule, start_date, end_date):
+ calendars = schedule.get_icalendars()
+ intervals = []
+ start_datetime = timezone.datetime.combine(start_date, datetime.time.min) + timezone.timedelta(milliseconds=1)
+ start_datetime = start_datetime.astimezone(pytz.UTC)
+ end_datetime = timezone.datetime.combine(end_date, datetime.time.max).astimezone(pytz.UTC)
+
+ for idx, calendar in enumerate(calendars):
+ if calendar is not None:
+ calendar_tz = get_icalendar_tz_or_utc(calendar)
+ events = ical_events.get_events_from_ical_between(
+ calendar,
+ start_datetime,
+ end_datetime,
+ )
+ for event in events:
+ start, end = start_end_with_respect_to_all_day(event, calendar_tz)
+ intervals.append(DatetimeInterval(start, end))
+ return detect_gaps(intervals, start_datetime, end_datetime)
+
+
+def detect_gaps(intervals, start, end):
+ gaps = []
+ intervals = sorted(intervals, key=lambda dt: dt.start)
+ if len(intervals) > 0:
+ base_interval = intervals[0]
+ if base_interval.start > start:
+ gaps.append(DatetimeInterval(None, base_interval.start))
+ for interval in intervals[1:]:
+ overlaps, new_base_interval = merge_if_overlaps(base_interval, interval)
+ if not overlaps:
+ gaps.append(DatetimeInterval(base_interval.end, interval.start))
+ base_interval = new_base_interval
+ if base_interval.end < end:
+ gaps.append(DatetimeInterval(base_interval.end, None))
+ return gaps
+
+
+def merge_if_overlaps(a: DatetimeInterval, b: DatetimeInterval):
+ if a.end >= b.end:
+ return True, DatetimeInterval(a.start, a.end)
+ if b.start - a.end < datetime.timedelta(minutes=1):
+ return True, DatetimeInterval(a.start, b.end)
+ else:
+ return False, DatetimeInterval(b.start, b.end)
+
+
+def start_end_with_respect_to_all_day(event, calendar_tz):
+ start, _ = ical_date_to_datetime(event[ICAL_DATETIME_START].dt, calendar_tz, start=True)
+ end, _ = ical_date_to_datetime(event[ICAL_DATETIME_END].dt, calendar_tz, start=False)
+ return start, end
+
+
+def convert_windows_timezone_to_iana(tz_name):
+ """
+ Conversion info taken from https://raw.githubusercontent.com/unicode-org/cldr/main/common/supplemental/windowsZones.xml
+ Also see https://gist.github.com/mrled/8d29fde758cfc7dd0b52f3bbf2b8f06e
+ NOTE: This mapping could be updated, and it's technically a guess.
+ Also, there could probably be issues with DST for some timezones.
+ """
+ windows_to_iana_map = {
+ "AUS Central Standard Time": "Australia/Darwin",
+ "AUS Eastern Standard Time": "Australia/Sydney",
+ "Afghanistan Standard Time": "Asia/Kabul",
+ "Alaskan Standard Time": "America/Anchorage",
+ "Aleutian Standard Time": "America/Adak",
+ "Altai Standard Time": "Asia/Barnaul",
+ "Arab Standard Time": "Asia/Riyadh",
+ "Arabian Standard Time": "Asia/Dubai",
+ "Arabic Standard Time": "Asia/Baghdad",
+ "Argentina Standard Time": "America/Buenos_Aires",
+ "Astrakhan Standard Time": "Europe/Astrakhan",
+ "Atlantic Standard Time": "America/Halifax",
+ "Aus Central W. Standard Time": "Australia/Eucla",
+ "Azerbaijan Standard Time": "Asia/Baku",
+ "Azores Standard Time": "Atlantic/Azores",
+ "Bahia Standard Time": "America/Bahia",
+ "Bangladesh Standard Time": "Asia/Dhaka",
+ "Belarus Standard Time": "Europe/Minsk",
+ "Bougainville Standard Time": "Pacific/Bougainville",
+ "Canada Central Standard Time": "America/Regina",
+ "Cape Verde Standard Time": "Atlantic/Cape_Verde",
+ "Caucasus Standard Time": "Asia/Yerevan",
+ "Cen. Australia Standard Time": "Australia/Adelaide",
+ "Central America Standard Time": "America/Guatemala",
+ "Central Asia Standard Time": "Asia/Almaty",
+ "Central Brazilian Standard Time": "America/Cuiaba",
+ "Central Europe Standard Time": "Europe/Budapest",
+ "Central European Standard Time": "Europe/Warsaw",
+ "Central Pacific Standard Time": "Pacific/Guadalcanal",
+ "Central Standard Time": "America/Chicago",
+ "Central Standard Time (Mexico)": "America/Mexico_City",
+ "Chatham Islands Standard Time": "Pacific/Chatham",
+ "China Standard Time": "Asia/Shanghai",
+ "Cuba Standard Time": "America/Havana",
+ "Dateline Standard Time": "Etc/GMT+12",
+ "E. Africa Standard Time": "Africa/Nairobi",
+ "E. Australia Standard Time": "Australia/Brisbane",
+ "E. Europe Standard Time": "Europe/Chisinau",
+ "E. South America Standard Time": "America/Sao_Paulo",
+ "Easter Island Standard Time": "Pacific/Easter",
+ "Eastern Standard Time": "America/New_York",
+ "Eastern Standard Time (Mexico)": "America/Cancun",
+ "Egypt Standard Time": "Africa/Cairo",
+ "Ekaterinburg Standard Time": "Asia/Yekaterinburg",
+ "FLE Standard Time": "Europe/Kiev",
+ "Fiji Standard Time": "Pacific/Fiji",
+ "GMT Standard Time": "Europe/London",
+ "GTB Standard Time": "Europe/Bucharest",
+ "Georgian Standard Time": "Asia/Tbilisi",
+ "Greenland Standard Time": "America/Godthab",
+ "Greenwich Standard Time": "Atlantic/Reykjavik",
+ "Haiti Standard Time": "America/Port-au-Prince",
+ "Hawaiian Standard Time": "Pacific/Honolulu",
+ "India Standard Time": "Asia/Calcutta",
+ "Iran Standard Time": "Asia/Tehran",
+ "Israel Standard Time": "Asia/Jerusalem",
+ "Jordan Standard Time": "Asia/Amman",
+ "Kaliningrad Standard Time": "Europe/Kaliningrad",
+ "Korea Standard Time": "Asia/Seoul",
+ "Libya Standard Time": "Africa/Tripoli",
+ "Line Islands Standard Time": "Pacific/Kiritimati",
+ "Lord Howe Standard Time": "Australia/Lord_Howe",
+ "Magadan Standard Time": "Asia/Magadan",
+ "Magallanes Standard Time": "America/Punta_Arenas",
+ "Marquesas Standard Time": "Pacific/Marquesas",
+ "Mauritius Standard Time": "Indian/Mauritius",
+ "Middle East Standard Time": "Asia/Beirut",
+ "Montevideo Standard Time": "America/Montevideo",
+ "Morocco Standard Time": "Africa/Casablanca",
+ "Mountain Standard Time": "America/Denver",
+ "Mountain Standard Time (Mexico)": "America/Chihuahua",
+ "Myanmar Standard Time": "Asia/Rangoon",
+ "N. Central Asia Standard Time": "Asia/Novosibirsk",
+ "Namibia Standard Time": "Africa/Windhoek",
+ "Nepal Standard Time": "Asia/Katmandu",
+ "New Zealand Standard Time": "Pacific/Auckland",
+ "Newfoundland Standard Time": "America/St_Johns",
+ "Norfolk Standard Time": "Pacific/Norfolk",
+ "North Asia East Standard Time": "Asia/Irkutsk",
+ "North Asia Standard Time": "Asia/Krasnoyarsk",
+ "North Korea Standard Time": "Asia/Pyongyang",
+ "Omsk Standard Time": "Asia/Omsk",
+ "Pacific SA Standard Time": "America/Santiago",
+ "Pacific Standard Time": "America/Los_Angeles",
+ "Pacific Standard Time (Mexico)": "America/Tijuana",
+ "Pakistan Standard Time": "Asia/Karachi",
+ "Paraguay Standard Time": "America/Asuncion",
+ "Qyzylorda Standard Time": "Asia/Qyzylorda",
+ "Romance Standard Time": "Europe/Paris",
+ "Russia Time Zone 10": "Asia/Srednekolymsk",
+ "Russia Time Zone 11": "Asia/Kamchatka",
+ "Russia Time Zone 3": "Europe/Samara",
+ "Russian Standard Time": "Europe/Moscow",
+ "SA Eastern Standard Time": "America/Cayenne",
+ "SA Pacific Standard Time": "America/Bogota",
+ "SA Western Standard Time": "America/La_Paz",
+ "SE Asia Standard Time": "Asia/Bangkok",
+ "Saint Pierre Standard Time": "America/Miquelon",
+ "Sakhalin Standard Time": "Asia/Sakhalin",
+ "Samoa Standard Time": "Pacific/Apia",
+ "Sao Tome Standard Time": "Africa/Sao_Tome",
+ "Saratov Standard Time": "Europe/Saratov",
+ "Singapore Standard Time": "Asia/Singapore",
+ "South Africa Standard Time": "Africa/Johannesburg",
+ "South Sudan Standard Time": "Africa/Juba",
+ "Sri Lanka Standard Time": "Asia/Colombo",
+ "Sudan Standard Time": "Africa/Khartoum",
+ "Syria Standard Time": "Asia/Damascus",
+ "Taipei Standard Time": "Asia/Taipei",
+ "Tasmania Standard Time": "Australia/Hobart",
+ "Tocantins Standard Time": "America/Araguaina",
+ "Tokyo Standard Time": "Asia/Tokyo",
+ "Tomsk Standard Time": "Asia/Tomsk",
+ "Tonga Standard Time": "Pacific/Tongatapu",
+ "Transbaikal Standard Time": "Asia/Chita",
+ "Turkey Standard Time": "Europe/Istanbul",
+ "Turks And Caicos Standard Time": "America/Grand_Turk",
+ "US Eastern Standard Time": "America/Indianapolis",
+ "US Mountain Standard Time": "America/Phoenix",
+ "UTC": "Etc/UTC",
+ "UTC+12": "Etc/GMT-12",
+ "UTC+13": "Etc/GMT-13",
+ "UTC-02": "Etc/GMT+2",
+ "UTC-08": "Etc/GMT+8",
+ "UTC-09": "Etc/GMT+9",
+ "UTC-11": "Etc/GMT+11",
+ "Ulaanbaatar Standard Time": "Asia/Ulaanbaatar",
+ "Venezuela Standard Time": "America/Caracas",
+ "Vladivostok Standard Time": "Asia/Vladivostok",
+ "Volgograd Standard Time": "Europe/Volgograd",
+ "W. Australia Standard Time": "Australia/Perth",
+ "W. Central Africa Standard Time": "Africa/Lagos",
+ "W. Europe Standard Time": "Europe/Berlin",
+ "W. Mongolia Standard Time": "Asia/Hovd",
+ "West Asia Standard Time": "Asia/Tashkent",
+ "West Bank Standard Time": "Asia/Hebron",
+ "West Pacific Standard Time": "Pacific/Port_Moresby",
+ "Yakutsk Standard Time": "Asia/Yakutsk",
+ "Yukon Standard Time": "America/Whitehorse",
+ }
+
+ result = windows_to_iana_map.get(tz_name)
+ logger.debug("Converting the timezone from Windows to IANA. '{}' -> '{}'".format(tz_name, result))
+
+ return result
diff --git a/engine/apps/schedules/migrations/0001_squashed_initial.py b/engine/apps/schedules/migrations/0001_squashed_initial.py
new file mode 100644
index 0000000000..8456b7fe78
--- /dev/null
+++ b/engine/apps/schedules/migrations/0001_squashed_initial.py
@@ -0,0 +1,94 @@
+# Generated by Django 3.2.5 on 2022-05-31 14:46
+
+import apps.schedules.models.custom_on_call_shift
+import apps.schedules.models.on_call_schedule
+import django.core.validators
+from django.db import migrations, models
+import django.db.models.deletion
+import uuid
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='CustomOnCallShift',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('public_primary_key', models.CharField(default=apps.schedules.models.custom_on_call_shift.generate_public_primary_key_for_custom_oncall_shift, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('name', models.CharField(max_length=200)),
+ ('time_zone', models.CharField(default=None, max_length=100, null=True)),
+ ('source', models.IntegerField(choices=[(0, 'web'), (1, 'api'), (2, 'slack'), (3, 'terraform')], default=1)),
+ ('rolling_users', models.JSONField(default=None, null=True)),
+ ('start_rotation_from_user_index', models.PositiveIntegerField(default=None, null=True)),
+ ('uuid', models.UUIDField(default=uuid.uuid4)),
+ ('type', models.IntegerField(choices=[(0, 'Single event'), (1, 'Recurrent event'), (2, 'Rolling users')])),
+ ('start', models.DateTimeField()),
+ ('duration', models.DurationField()),
+ ('frequency', models.IntegerField(choices=[(0, 'Daily'), (1, 'Weekly'), (2, 'Monthly')], default=None, null=True)),
+ ('priority_level', models.IntegerField(default=0)),
+ ('interval', models.IntegerField(default=None, null=True)),
+ ('week_start', models.IntegerField(choices=[(0, 'Monday'), (1, 'Tuesday'), (2, 'Wednesday'), (3, 'Thursday'), (4, 'Friday'), (5, 'Saturday'), (6, 'Sunday')], default=6)),
+ ('by_day', models.JSONField(default=None, null=True)),
+ ('by_month', models.JSONField(default=None, null=True)),
+ ('by_monthday', models.JSONField(default=None, null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='OnCallSchedule',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('public_primary_key', models.CharField(default=apps.schedules.models.on_call_schedule.generate_public_primary_key_for_oncall_schedule_channel, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('cached_ical_file_primary', models.TextField(default=None, null=True)),
+ ('prev_ical_file_primary', models.TextField(default=None, null=True)),
+ ('cached_ical_file_overrides', models.TextField(default=None, null=True)),
+ ('prev_ical_file_overrides', models.TextField(default=None, null=True)),
+ ('name', models.CharField(max_length=200)),
+ ('channel', models.CharField(default=None, max_length=100, null=True)),
+ ('current_shifts', models.TextField(default='{}')),
+ ('empty_oncall', models.BooleanField(default=True)),
+ ('notify_oncall_shift_freq', models.IntegerField(choices=[(0, 'Never'), (1, 'Each shift')], default=1)),
+ ('mention_oncall_start', models.BooleanField(default=True)),
+ ('mention_oncall_next', models.BooleanField(default=False)),
+ ('notify_empty_oncall', models.IntegerField(choices=[(0, 'Notify all people in the channel'), (1, 'Mention person from the previous slot'), (2, 'Inform about empty slot')], default=0)),
+ ('has_gaps', models.BooleanField(default=False)),
+ ('gaps_report_sent_at', models.DateField(default=None, null=True)),
+ ('has_empty_shifts', models.BooleanField(default=False)),
+ ('empty_shifts_report_sent_at', models.DateField(default=None, null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='OnCallScheduleCalendar',
+ fields=[
+ ('oncallschedule_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='schedules.oncallschedule')),
+ ('ical_url_overrides', models.CharField(default=None, max_length=500, null=True)),
+ ('ical_file_error_overrides', models.CharField(default=None, max_length=200, null=True)),
+ ('time_zone', models.CharField(default='UTC', max_length=100)),
+ ],
+ options={
+ 'abstract': False,
+ 'base_manager_name': 'objects',
+ },
+ bases=('schedules.oncallschedule',),
+ ),
+ migrations.CreateModel(
+ name='OnCallScheduleICal',
+ fields=[
+ ('oncallschedule_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='schedules.oncallschedule')),
+ ('ical_url_primary', models.CharField(default=None, max_length=500, null=True)),
+ ('ical_file_error_primary', models.CharField(default=None, max_length=200, null=True)),
+ ('ical_url_overrides', models.CharField(default=None, max_length=500, null=True)),
+ ('ical_file_error_overrides', models.CharField(default=None, max_length=200, null=True)),
+ ],
+ options={
+ 'abstract': False,
+ 'base_manager_name': 'objects',
+ },
+ bases=('schedules.oncallschedule',),
+ ),
+ ]
diff --git a/engine/apps/schedules/migrations/0002_squashed_initial.py b/engine/apps/schedules/migrations/0002_squashed_initial.py
new file mode 100644
index 0000000000..d5e25ccd19
--- /dev/null
+++ b/engine/apps/schedules/migrations/0002_squashed_initial.py
@@ -0,0 +1,67 @@
+# Generated by Django 3.2.5 on 2022-05-31 14:46
+
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ('user_management', '0001_squashed_initial'),
+ ('slack', '0002_squashed_initial'),
+ ('contenttypes', '0002_remove_content_type_name'),
+ ('schedules', '0001_squashed_initial'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='oncallschedule',
+ name='organization',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='oncall_schedules', to='user_management.organization'),
+ ),
+ migrations.AddField(
+ model_name='oncallschedule',
+ name='polymorphic_ctype',
+ field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_schedules.oncallschedule_set+', to='contenttypes.contenttype'),
+ ),
+ migrations.AddField(
+ model_name='oncallschedule',
+ name='team',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='oncall_schedules', to='user_management.team'),
+ ),
+ migrations.AddField(
+ model_name='oncallschedule',
+ name='user_group',
+ field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='oncall_schedules', to='slack.slackusergroup'),
+ ),
+ migrations.AddField(
+ model_name='customoncallshift',
+ name='organization',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='custom_on_call_shifts', to='user_management.organization'),
+ ),
+ migrations.AddField(
+ model_name='customoncallshift',
+ name='team',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='custom_on_call_shifts', to='user_management.team'),
+ ),
+ migrations.AddField(
+ model_name='customoncallshift',
+ name='users',
+ field=models.ManyToManyField(to='user_management.User'),
+ ),
+ migrations.AddField(
+ model_name='oncallschedulecalendar',
+ name='custom_on_call_shifts',
+ field=models.ManyToManyField(related_name='schedules', to='schedules.CustomOnCallShift'),
+ ),
+ migrations.AlterUniqueTogether(
+ name='oncallschedule',
+ unique_together={('name', 'organization')},
+ ),
+ migrations.AlterUniqueTogether(
+ name='customoncallshift',
+ unique_together={('name', 'organization')},
+ ),
+ ]
diff --git a/engine/apps/schedules/migrations/__init__.py b/engine/apps/schedules/migrations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/schedules/models/__init__.py b/engine/apps/schedules/models/__init__.py
new file mode 100644
index 0000000000..c5a6279099
--- /dev/null
+++ b/engine/apps/schedules/models/__init__.py
@@ -0,0 +1,2 @@
+from .custom_on_call_shift import CustomOnCallShift # noqa: F401
+from .on_call_schedule import OnCallSchedule, OnCallScheduleCalendar, OnCallScheduleICal # noqa: F401
diff --git a/engine/apps/schedules/models/custom_on_call_shift.py b/engine/apps/schedules/models/custom_on_call_shift.py
new file mode 100644
index 0000000000..ffe0951125
--- /dev/null
+++ b/engine/apps/schedules/models/custom_on_call_shift.py
@@ -0,0 +1,338 @@
+import logging
+from calendar import monthrange
+from uuid import uuid4
+
+import pytz
+from django.apps import apps
+from django.conf import settings
+from django.core.validators import MinLengthValidator
+from django.db import models
+from django.db.models import JSONField
+from django.utils import timezone
+from django.utils.functional import cached_property
+from icalendar.cal import Event
+from recurring_ical_events import UnfoldableCalendar
+
+from apps.schedules.tasks import drop_cached_ical_task
+from apps.user_management.models import User
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+def generate_public_primary_key_for_custom_oncall_shift():
+ prefix = "O"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while CustomOnCallShift.objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="CustomOnCallShift"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+class CustomOnCallShift(models.Model):
+ (
+ FREQUENCY_DAILY,
+ FREQUENCY_WEEKLY,
+ FREQUENCY_MONTHLY,
+ ) = range(3)
+
+ FREQUENCY_CHOICES = (
+ (FREQUENCY_DAILY, "Daily"),
+ (FREQUENCY_WEEKLY, "Weekly"),
+ (FREQUENCY_MONTHLY, "Monthly"),
+ )
+
+ PUBLIC_FREQUENCY_CHOICES_MAP = {
+ FREQUENCY_DAILY: "daily",
+ FREQUENCY_WEEKLY: "weekly",
+ FREQUENCY_MONTHLY: "monthly",
+ }
+
+ (
+ TYPE_SINGLE_EVENT,
+ TYPE_RECURRENT_EVENT,
+ TYPE_ROLLING_USERS_EVENT,
+ ) = range(3)
+
+ TYPE_CHOICES = (
+ (TYPE_SINGLE_EVENT, "Single event"),
+ (TYPE_RECURRENT_EVENT, "Recurrent event"),
+ (TYPE_ROLLING_USERS_EVENT, "Rolling users"),
+ )
+
+ PUBLIC_TYPE_CHOICES_MAP = {
+ TYPE_SINGLE_EVENT: "single_event",
+ TYPE_RECURRENT_EVENT: "recurrent_event",
+ TYPE_ROLLING_USERS_EVENT: "rolling_users",
+ }
+
+ (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
+
+ WEEKDAY_CHOICES = (
+ (MONDAY, "Monday"),
+ (TUESDAY, "Tuesday"),
+ (WEDNESDAY, "Wednesday"),
+ (THURSDAY, "Thursday"),
+ (FRIDAY, "Friday"),
+ (SATURDAY, "Saturday"),
+ (SUNDAY, "Sunday"),
+ )
+
+ ICAL_WEEKDAY_MAP = {
+ MONDAY: "MO",
+ TUESDAY: "TU",
+ WEDNESDAY: "WE",
+ THURSDAY: "TH",
+ FRIDAY: "FR",
+ SATURDAY: "SA",
+ SUNDAY: "SU",
+ }
+ (
+ SOURCE_WEB,
+ SOURCE_API,
+ SOURCE_SLACK,
+ SOURCE_TERRAFORM,
+ ) = range(4)
+
+ SOURCE_CHOICES = (
+ (SOURCE_WEB, "web"),
+ (SOURCE_API, "api"),
+ (SOURCE_SLACK, "slack"),
+ (SOURCE_TERRAFORM, "terraform"),
+ )
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_custom_oncall_shift,
+ )
+
+ organization = models.ForeignKey(
+ "user_management.Organization",
+ on_delete=models.CASCADE,
+ related_name="custom_on_call_shifts",
+ )
+ team = models.ForeignKey(
+ "user_management.Team",
+ on_delete=models.SET_NULL,
+ related_name="custom_on_call_shifts",
+ null=True,
+ default=None,
+ )
+ name = models.CharField(max_length=200)
+ time_zone = models.CharField(max_length=100, null=True, default=None)
+ source = models.IntegerField(choices=SOURCE_CHOICES, default=SOURCE_API)
+ users = models.ManyToManyField("user_management.User") # users in single and recurrent events
+ rolling_users = JSONField(null=True, default=None) # [{user.pk: user.public_primary_key, ...},...]
+ start_rotation_from_user_index = models.PositiveIntegerField(null=True, default=None)
+
+ uuid = models.UUIDField(default=uuid4) # event uuid
+ type = models.IntegerField(choices=TYPE_CHOICES) # "rolling_users", "recurrent_event", "single_event"
+
+ start = models.DateTimeField() # event start datetime
+ duration = models.DurationField() # duration in seconds
+
+ frequency = models.IntegerField(choices=FREQUENCY_CHOICES, null=True, default=None)
+
+ priority_level = models.IntegerField(default=0)
+
+ interval = models.IntegerField(default=None, null=True) # every n days/months - ical format
+
+ # week_start in ical format
+ week_start = models.IntegerField(choices=WEEKDAY_CHOICES, default=SUNDAY) # for weekly events
+
+ by_day = JSONField(
+ default=None, null=True
+ ) # [] BYDAY - (MO, TU); (1MO, -2TU) - for monthly and weekly freq - ical format
+ by_month = JSONField(default=None, null=True) # [] BYMONTH - what months (1, 2, 3, ...) - ical format
+ by_monthday = JSONField(default=None, null=True) # [] BYMONTHDAY - what days of month (1, 2, -3) - ical format
+
+ class Meta:
+ unique_together = ("name", "organization")
+
+ def delete(self, *args, **kwargs):
+ for schedule in self.schedules.all():
+ drop_cached_ical_task.apply_async((schedule.pk,))
+ super().delete(*args, **kwargs)
+
+ @property
+ def repr_settings_for_client_side_logging(self) -> str:
+ """
+ Example of execution:
+ name: Demo recurrent event, team: example, source: terraform, type: Recurrent event, users: Alex,
+ start: 2020-09-10T16:00:00+00:00, duration: 3:00:00, priority level: 0, frequency: Weekly, interval: 2,
+ week start: 6, by day: ['MO', 'WE', 'FR'], by month: None, by monthday: None
+ """
+ if self.type == CustomOnCallShift.TYPE_ROLLING_USERS_EVENT:
+ users_verbal = "empty"
+ if self.rolling_users is not None:
+ users_verbal = ""
+ for users_dict in self.rolling_users:
+ users = self.organization.users.filter(public_primary_key__in=users_dict.values())
+ users_verbal += f"[{', '.join([user.username for user in users])}]"
+ users_verbal = f"rolling users: {users_verbal}"
+ else:
+ users = self.users.all()
+ users_verbal = f"{', '.join([user.username for user in users]) if users else 'empty'}"
+ result = (
+ f"name: {self.name}, team: {self.team.name if self.team else 'No team'},"
+ f"{f' time_zone: {self.time_zone},' if self.time_zone else ''} "
+ f"source: {self.get_source_display()}, type: {self.get_type_display()}, users: {users_verbal}, "
+ f"start: {self.start.isoformat()}, duration: {self.duration}, priority level: {self.priority_level}"
+ )
+ if self.type != CustomOnCallShift.TYPE_SINGLE_EVENT:
+ result += (
+ f", frequency: {self.get_frequency_display()}, interval: {self.interval}, "
+ f"week start: {self.week_start}, by day: {self.by_day}, by month: {self.by_month}, "
+ f"by monthday: {self.by_monthday}"
+ )
+ return result
+
+ def convert_to_ical(self, time_zone="UTC"):
+ result = ""
+ # use shift time_zone if it exists, otherwise use schedule or default time_zone
+ time_zone = self.time_zone if self.time_zone is not None else time_zone
+ # rolling_users shift converts to several ical events
+ if self.type == CustomOnCallShift.TYPE_ROLLING_USERS_EVENT:
+ event_ical = None
+ users_queue = self.get_rolling_users()
+ for counter, users in enumerate(users_queue, start=1):
+ start = self.get_next_start_date(event_ical)
+ for user_counter, user in enumerate(users, start=1):
+ event_ical = self.generate_ical(user, start, user_counter, counter, time_zone)
+ result += event_ical
+ else:
+ for user_counter, user in enumerate(self.users.all(), start=1):
+ result += self.generate_ical(user, self.start, user_counter, time_zone=time_zone)
+ return result
+
+ def generate_ical(self, user, start, user_counter, counter=1, time_zone="UTC"):
+ # create event for each user in a list because we can't parse multiple users from ical summary
+ event = Event()
+ event["uid"] = f"amixr-{self.uuid}-U{user_counter}-E{counter}-S{self.source}"
+ event.add("summary", self.get_summary_with_user_for_ical(user))
+ event.add("dtstart", self.convert_dt_to_schedule_timezone(start, time_zone))
+ event.add("dtend", self.convert_dt_to_schedule_timezone(start + self.duration, time_zone))
+ event.add("dtstamp", timezone.now())
+ if self.event_ical_rules:
+ event.add("rrule", self.event_ical_rules)
+ try:
+ event_in_ical = event.to_ical().decode("utf-8")
+ except ValueError as e:
+ logger.warning(f"Cannot convert event with pk {self.pk} to ical: {str(e)}")
+ event_in_ical = ""
+ return event_in_ical
+
+ def get_summary_with_user_for_ical(self, user: User) -> str:
+ summary = ""
+ if self.priority_level > 0:
+ summary += f"[L{self.priority_level}] "
+ summary += f"{user.username} "
+ return summary
+
+ def get_next_start_date(self, event_ical):
+ """Get date of the next event (for rolling_users shifts)"""
+ if event_ical is None:
+ return self.start
+ current_event = Event.from_ical(event_ical)
+ # take shift interval, not event interval. For rolling_users shift it is not the same.
+ current_event["rrule"]["INTERVAL"] = self.interval or 1
+ current_event_start = current_event["DTSTART"].dt
+ next_event_start = current_event_start
+ ONE_DAY = 1
+
+ if self.frequency == CustomOnCallShift.FREQUENCY_DAILY:
+ # test daily with byday
+ next_event_start = current_event_start + timezone.timedelta(days=ONE_DAY)
+ elif self.frequency == CustomOnCallShift.FREQUENCY_WEEKLY:
+ DAYS_IN_A_WEEK = 7
+ days_for_next_event = DAYS_IN_A_WEEK - current_event_start.weekday() + self.week_start
+ if days_for_next_event > DAYS_IN_A_WEEK:
+ days_for_next_event = days_for_next_event % DAYS_IN_A_WEEK
+ next_event_start = current_event_start + timezone.timedelta(days=days_for_next_event)
+ elif self.frequency == CustomOnCallShift.FREQUENCY_MONTHLY:
+ DAYS_IN_A_MONTH = monthrange(self.start.year, self.start.month)[1]
+ # count days before the next month starts
+ days_for_next_event = DAYS_IN_A_MONTH - current_event_start.day + ONE_DAY
+ if days_for_next_event > DAYS_IN_A_MONTH:
+ days_for_next_event = days_for_next_event % DAYS_IN_A_MONTH
+ next_event_start = current_event_start + timezone.timedelta(days=days_for_next_event)
+ next_event = None
+ # repetitions generate the next event shift according with the recurrence rules
+ repetitions = UnfoldableCalendar(current_event).RepeatedEvent(
+ current_event, next_event_start.replace(microsecond=0)
+ )
+ ical_iter = repetitions.__iter__()
+ for event in ical_iter:
+ if event.start.date() >= next_event_start.date():
+ next_event = event
+ break
+ next_event_dt = next_event.start
+ return next_event_dt
+
+ @cached_property
+ def event_ical_rules(self):
+ # e.g. {'freq': ['WEEKLY'], 'interval': [2], 'byday': ['MO', 'WE', 'FR'], 'wkst': ['SU']}
+ rules = {}
+ if self.frequency is not None:
+ rules["freq"] = [self.get_frequency_display().upper()]
+ if self.event_interval is not None:
+ rules["interval"] = [self.event_interval]
+ if self.by_day is not None:
+ rules["byday"] = self.by_day
+ if self.by_month is not None:
+ rules["bymonth"] = self.by_month
+ if self.by_monthday is not None:
+ rules["bymonthday"] = self.by_monthday
+ if self.week_start is not None:
+ rules["wkst"] = CustomOnCallShift.ICAL_WEEKDAY_MAP[self.week_start]
+ return rules
+
+ @cached_property
+ def event_interval(self):
+ if self.type == CustomOnCallShift.TYPE_ROLLING_USERS_EVENT:
+ if self.rolling_users:
+ if self.interval is not None:
+ return self.interval * len(self.rolling_users)
+ else:
+ return len(self.rolling_users)
+ return self.interval
+
+ def convert_dt_to_schedule_timezone(self, dt, time_zone):
+ start_naive = dt.replace(tzinfo=None)
+ return pytz.timezone(time_zone).localize(start_naive, is_dst=None)
+
+ def get_rolling_users(self):
+ User = apps.get_model("user_management", "User")
+ all_users_pks = set()
+ users_queue = []
+ if self.rolling_users is not None:
+ # get all users pks from rolling_users field
+ for users_dict in self.rolling_users:
+ all_users_pks.update(users_dict.keys())
+ users = User.objects.filter(pk__in=all_users_pks)
+ # generate users_queue list with user objects
+ if self.start_rotation_from_user_index is not None:
+ rolling_users = (
+ self.rolling_users[self.start_rotation_from_user_index :]
+ + self.rolling_users[: self.start_rotation_from_user_index]
+ )
+ else:
+ rolling_users = self.rolling_users
+ for users_dict in rolling_users:
+ users_list = list(users.filter(pk__in=users_dict.keys()))
+ users_queue.append(users_list)
+ return users_queue
+
+ def add_rolling_users(self, rolling_users_list):
+ result = []
+ for users in rolling_users_list:
+ result.append({user.pk: user.public_primary_key for user in users})
+ self.rolling_users = result
+ self.save(update_fields=["rolling_users"])
diff --git a/engine/apps/schedules/models/on_call_schedule.py b/engine/apps/schedules/models/on_call_schedule.py
new file mode 100644
index 0000000000..4e7e79dd71
--- /dev/null
+++ b/engine/apps/schedules/models/on_call_schedule.py
@@ -0,0 +1,395 @@
+import icalendar
+from django.apps import apps
+from django.conf import settings
+from django.core.validators import MinLengthValidator
+from django.db import models
+from django.utils import timezone
+from django.utils.functional import cached_property
+from icalendar.cal import Calendar
+from polymorphic.managers import PolymorphicManager
+from polymorphic.models import PolymorphicModel
+from polymorphic.query import PolymorphicQuerySet
+
+from apps.schedules.ical_utils import (
+ fetch_ical_file_or_get_error,
+ list_of_empty_shifts_in_schedule,
+ list_of_gaps_in_schedule,
+ list_users_to_notify_from_ical,
+)
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+
+
+def generate_public_primary_key_for_oncall_schedule_channel():
+ prefix = "S"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while OnCallSchedule.objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="OnCallSchedule"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+class OnCallScheduleQuerySet(PolymorphicQuerySet):
+ def get_oncall_users(self, events_datetime=None):
+ if events_datetime is None:
+ events_datetime = timezone.datetime.now(timezone.utc)
+
+ users = set()
+
+ for schedule in self.all():
+ schedule_oncall_users = list_users_to_notify_from_ical(schedule, events_datetime=events_datetime)
+ if schedule_oncall_users is None:
+ continue
+
+ users.update(schedule_oncall_users)
+
+ return list(users)
+
+
+class OnCallSchedule(PolymorphicModel):
+ objects = PolymorphicManager.from_queryset(OnCallScheduleQuerySet)()
+
+ # type of calendars in schedule
+ TYPE_ICAL_PRIMARY, TYPE_ICAL_OVERRIDES, TYPE_CALENDAR = range(
+ 3
+ ) # todo: discuss do we need the third type (this types used for frontend)
+ PRIMARY, OVERRIDES = range(2)
+ CALENDAR_TYPE_VERBAL = {PRIMARY: "primary", OVERRIDES: "overrides"}
+
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_oncall_schedule_channel,
+ )
+
+ cached_ical_file_primary = models.TextField(null=True, default=None)
+ prev_ical_file_primary = models.TextField(null=True, default=None)
+
+ cached_ical_file_overrides = models.TextField(null=True, default=None)
+ prev_ical_file_overrides = models.TextField(null=True, default=None)
+
+ organization = models.ForeignKey(
+ "user_management.Organization", on_delete=models.CASCADE, related_name="oncall_schedules"
+ )
+
+ team = models.ForeignKey(
+ "user_management.Team",
+ on_delete=models.SET_NULL,
+ related_name="oncall_schedules",
+ null=True,
+ default=None,
+ )
+
+ name = models.CharField(max_length=200)
+ channel = models.CharField(max_length=100, null=True, default=None)
+
+ # Slack user group to be updated when on-call users change for this schedule
+ user_group = models.ForeignKey(
+ to="slack.SlackUserGroup", null=True, on_delete=models.SET_NULL, related_name="oncall_schedules"
+ )
+
+ # schedule reminder related fields
+ class NotifyOnCallShiftFreq(models.IntegerChoices):
+ NEVER = 0, "Never"
+ EACH_SHIFT = 1, "Each shift"
+
+ class NotifyEmptyOnCall(models.IntegerChoices):
+ ALL = 0, "Notify all people in the channel"
+ PREV = 1, "Mention person from the previous slot"
+ NO_ONE = 2, "Inform about empty slot"
+
+ current_shifts = models.TextField(null=False, default="{}")
+ # Used to not drop current_shifts to use them when "Mention person from the previous slot"
+ empty_oncall = models.BooleanField(default=True)
+ notify_oncall_shift_freq = models.IntegerField(
+ null=False,
+ choices=NotifyOnCallShiftFreq.choices,
+ default=NotifyOnCallShiftFreq.EACH_SHIFT,
+ )
+ mention_oncall_start = models.BooleanField(null=False, default=True)
+ mention_oncall_next = models.BooleanField(null=False, default=False)
+ notify_empty_oncall = models.IntegerField(
+ null=False, choices=NotifyEmptyOnCall.choices, default=NotifyEmptyOnCall.ALL
+ )
+
+ # Gaps-checker related fields
+ has_gaps = models.BooleanField(default=False)
+ gaps_report_sent_at = models.DateField(null=True, default=None)
+
+ # empty shifts checker related fields
+ has_empty_shifts = models.BooleanField(default=False)
+ empty_shifts_report_sent_at = models.DateField(null=True, default=None)
+
+ class Meta:
+ unique_together = ("name", "organization")
+
+ @property
+ def repr_settings_for_client_side_logging(self):
+ """
+ Example of execution:
+ name: test, team: example, url: None
+ slack reminder settings: notification frequency: Each shift, current shift notification: Yes,
+ next shift notification: No, action for slot when no one is on-call: Notify all people in the channel
+ """
+ result = f"name: {self.name}, team: {self.team.name if self.team else 'No team'}"
+
+ if self.organization.slack_team_identity:
+ if self.channel:
+ SlackChannel = apps.get_model("slack", "SlackChannel")
+ sti = self.organization.slack_team_identity
+ slack_channel = SlackChannel.objects.filter(slack_team_identity=sti, slack_id=self.channel).first()
+ if slack_channel:
+ result += f", slack channel: {slack_channel.name}"
+
+ if self.user_group is not None:
+ result += f", user group: {self.user_group.handle}"
+
+ result += (
+ f"\nslack reminder settings: "
+ f"notification frequency: {self.get_notify_oncall_shift_freq_display()}, "
+ f"current shift notification: {'Yes' if self.mention_oncall_start else 'No'}, "
+ f"next shift notification: {'Yes' if self.mention_oncall_next else 'No'}, "
+ f"action for slot when no one is on-call: {self.get_notify_empty_oncall_display()}"
+ )
+ return result
+
+ def get_icalendars(self):
+ """Returns list of calendars. Primary calendar should always be the first"""
+ calendar_primary = None
+ calendar_overrides = None
+ if self._ical_file_primary is not None:
+ calendar_primary = icalendar.Calendar.from_ical(self._ical_file_primary)
+ if self._ical_file_overrides is not None:
+ calendar_overrides = icalendar.Calendar.from_ical(self._ical_file_overrides)
+ return calendar_primary, calendar_overrides
+
+ def get_prev_and_current_ical_files(self):
+ """Returns list of tuples with prev and current iCal files for each calendar"""
+ return [
+ (self.prev_ical_file_primary, self.cached_ical_file_primary),
+ (self.prev_ical_file_overrides, self.cached_ical_file_overrides),
+ ]
+
+ def check_gaps_for_next_week(self):
+ today = timezone.now().date()
+ gaps = list_of_gaps_in_schedule(self, today, today + timezone.timedelta(days=7))
+ has_gaps = len(gaps) != 0
+ self.has_gaps = has_gaps
+ self.save(update_fields=["has_gaps"])
+ return has_gaps
+
+ def check_empty_shifts_for_next_week(self):
+ today = timezone.now().date()
+ empty_shifts = list_of_empty_shifts_in_schedule(self, today, today + timezone.timedelta(days=7))
+ has_empty_shifts = len(empty_shifts) != 0
+ self.has_empty_shifts = has_empty_shifts
+ self.save(update_fields=["has_empty_shifts"])
+ return has_empty_shifts
+
+ def drop_cached_ical(self):
+ self._drop_primary_ical_file()
+ self._drop_overrides_ical_file()
+
+ def refresh_ical_file(self):
+ self._refresh_primary_ical_file()
+ self._refresh_overrides_ical_file()
+
+ def _ical_file_primary(self):
+ raise NotImplementedError
+
+ def _ical_file_overrides(self):
+ raise NotImplementedError
+
+ def _refresh_primary_ical_file(self):
+ raise NotImplementedError
+
+ def _refresh_overrides_ical_file(self):
+ raise NotImplementedError
+
+ def _drop_primary_ical_file(self):
+ raise NotImplementedError
+
+ def _drop_overrides_ical_file(self):
+ raise NotImplementedError
+
+
+class OnCallScheduleICal(OnCallSchedule):
+ # For the ical schedule both primary and overrides icals are imported via ical url
+ ical_url_primary = models.CharField(max_length=500, null=True, default=None)
+ ical_file_error_primary = models.CharField(max_length=200, null=True, default=None)
+
+ ical_url_overrides = models.CharField(max_length=500, null=True, default=None)
+ ical_file_error_overrides = models.CharField(max_length=200, null=True, default=None)
+
+ @cached_property
+ def _ical_file_primary(self):
+ """
+ Download iCal file imported from calendar
+ """
+ cached_ical_file = self.cached_ical_file_primary
+ if self.ical_url_primary is not None and self.cached_ical_file_primary is None:
+ self.cached_ical_file_primary, self.ical_file_error_primary = fetch_ical_file_or_get_error(
+ self.ical_url_primary
+ )
+ self.save(update_fields=["cached_ical_file_primary", "ical_file_error_primary"])
+ cached_ical_file = self.cached_ical_file_primary
+ return cached_ical_file
+
+ @cached_property
+ def _ical_file_overrides(self):
+ """
+ Download iCal file imported from calendar
+ """
+ cached_ical_file = self.cached_ical_file_overrides
+ if self.ical_url_overrides is not None and self.cached_ical_file_overrides is None:
+ self.cached_ical_file_overrides, self.ical_file_error_overrides = fetch_ical_file_or_get_error(
+ self.ical_url_overrides
+ )
+ self.save(update_fields=["cached_ical_file_overrides", "ical_file_error_overrides"])
+ cached_ical_file = self.cached_ical_file_overrides
+ return cached_ical_file
+
+ def _drop_primary_ical_file(self):
+ self.prev_ical_file_primary = self.cached_ical_file_primary
+ self.cached_ical_file_primary = None
+ self.save(
+ update_fields=[
+ "cached_ical_file_primary",
+ "prev_ical_file_primary",
+ ]
+ )
+
+ def _drop_overrides_ical_file(self):
+ self.prev_ical_file_overrides = self.cached_ical_file_overrides
+ self.cached_ical_file_overrides = None
+ self.save(
+ update_fields=[
+ "cached_ical_file_overrides",
+ "prev_ical_file_overrides",
+ ]
+ )
+
+ def _refresh_primary_ical_file(self):
+ self.prev_ical_file_primary = self.cached_ical_file_primary
+ if self.ical_url_primary is not None:
+ self.cached_ical_file_primary, self.ical_file_error_primary = fetch_ical_file_or_get_error(
+ self.ical_url_primary,
+ )
+ self.save(update_fields=["cached_ical_file_primary", "prev_ical_file_primary", "ical_file_error_primary"])
+
+ def _refresh_overrides_ical_file(self):
+ if self.ical_url_overrides is not None:
+ self.cached_ical_file_overrides, self.ical_file_error_overrides = fetch_ical_file_or_get_error(
+ self.ical_url_overrides,
+ )
+ self.save(update_fields=["cached_ical_file_overrides", "prev_ical_file_overrides", "ical_file_error_overrides"])
+
+ @property
+ def repr_settings_for_client_side_logging(self):
+ result = super().repr_settings_for_client_side_logging
+ result += (
+ f", primary calendar url: {self.ical_url_primary}, " f"overrides calendar url: {self.ical_url_overrides}"
+ )
+ return result
+
+
+class OnCallScheduleCalendar(OnCallSchedule):
+ # For the calendar schedule only overrides ical is imported via ical url.
+ ical_url_overrides = models.CharField(max_length=500, null=True, default=None)
+ ical_file_error_overrides = models.CharField(max_length=200, null=True, default=None)
+
+ # Primary ical is generated from custom_on_call_shifts.
+ time_zone = models.CharField(max_length=100, default="UTC")
+ custom_on_call_shifts = models.ManyToManyField("schedules.CustomOnCallShift", related_name="schedules")
+
+ @cached_property
+ def _ical_file_primary(self):
+ """
+ Return cached ical file with iCal events from custom on-call shifts
+ """
+ if self.cached_ical_file_primary is None:
+ self.cached_ical_file_primary = self._generate_ical_file_primary()
+ self.save(update_fields=["cached_ical_file_primary"])
+ return self.cached_ical_file_primary
+
+ @cached_property
+ def _ical_file_overrides(self):
+ """
+ Download iCal file imported from calendar
+ """
+ cached_ical_file = self.cached_ical_file_overrides
+ if self.ical_url_overrides is not None and self.cached_ical_file_overrides is None:
+ self.cached_ical_file_overrides, self.ical_file_error_overrides = fetch_ical_file_or_get_error(
+ self.ical_url_overrides
+ )
+ self.save(update_fields=["cached_ical_file_overrides", "ical_file_error_overrides"])
+ cached_ical_file = self.cached_ical_file_overrides
+ return cached_ical_file
+
+ def _refresh_primary_ical_file(self):
+ self.prev_ical_file_primary = self.cached_ical_file_primary
+ self.cached_ical_file_primary = self._generate_ical_file_primary()
+ self.save(
+ update_fields=[
+ "cached_ical_file_primary",
+ "prev_ical_file_primary",
+ ]
+ )
+
+ def _refresh_overrides_ical_file(self):
+ self.prev_ical_file_overrides = self.cached_ical_file_overrides
+ if self.ical_url_overrides is not None:
+ self.cached_ical_file_overrides, self.ical_file_error_overrides = fetch_ical_file_or_get_error(
+ self.ical_url_overrides,
+ )
+ self.save(update_fields=["cached_ical_file_overrides", "prev_ical_file_overrides", "ical_file_error_overrides"])
+
+ def _drop_primary_ical_file(self):
+ self.prev_ical_file_primary = self.cached_ical_file_primary
+ self.cached_ical_file_primary = None
+ self.save(
+ update_fields=[
+ "cached_ical_file_primary",
+ "prev_ical_file_primary",
+ ]
+ )
+
+ def _drop_overrides_ical_file(self):
+ self.prev_ical_file_overrides = self.cached_ical_file_overrides
+ self.cached_ical_file_overrides = None
+ self.save(
+ update_fields=[
+ "cached_ical_file_overrides",
+ "prev_ical_file_overrides",
+ ]
+ )
+
+ def _generate_ical_file_primary(self):
+ """
+ Generate iCal events file from custom on-call shifts (created via API)
+ """
+ ical = None
+ if self.custom_on_call_shifts.exists():
+ end_line = "END:VCALENDAR"
+ calendar = Calendar()
+ calendar.add("prodid", "-//My calendar product//amixr//")
+ calendar.add("version", "2.0")
+ calendar.add("method", "PUBLISH")
+ ical_file = calendar.to_ical().decode()
+ ical = ical_file.replace(end_line, "").strip()
+ ical = f"{ical}\r\n"
+ for event in self.custom_on_call_shifts.all():
+ ical += event.convert_to_ical(self.time_zone)
+ ical += f"{end_line}\r\n"
+ return ical
+
+ @property
+ def repr_settings_for_client_side_logging(self):
+ result = super().repr_settings_for_client_side_logging
+ result += f", overrides calendar url: {self.ical_url_overrides}"
+ return result
diff --git a/engine/apps/schedules/tasks/__init__.py b/engine/apps/schedules/tasks/__init__.py
new file mode 100644
index 0000000000..9bc75ff5c3
--- /dev/null
+++ b/engine/apps/schedules/tasks/__init__.py
@@ -0,0 +1,16 @@
+from .drop_cached_ical import drop_cached_ical_for_custom_events_for_organization, drop_cached_ical_task # noqa: F401
+from .notify_about_empty_shifts_in_schedule import ( # noqa: F401
+ check_empty_shifts_in_schedule,
+ notify_about_empty_shifts_in_schedule,
+ schedule_notify_about_empty_shifts_in_schedule,
+ start_check_empty_shifts_in_schedule,
+ start_notify_about_empty_shifts_in_schedule,
+)
+from .notify_about_gaps_in_schedule import ( # noqa: F401
+ check_gaps_in_schedule,
+ notify_about_gaps_in_schedule,
+ schedule_notify_about_gaps_in_schedule,
+ start_check_gaps_in_schedule,
+ start_notify_about_gaps_in_schedule,
+)
+from .refresh_ical_files import refresh_ical_file, start_refresh_ical_files # noqa: F401
diff --git a/engine/apps/schedules/tasks/drop_cached_ical.py b/engine/apps/schedules/tasks/drop_cached_ical.py
new file mode 100644
index 0000000000..5b7bcb1c65
--- /dev/null
+++ b/engine/apps/schedules/tasks/drop_cached_ical.py
@@ -0,0 +1,30 @@
+from celery.utils.log import get_task_logger
+from django.apps import apps
+
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+task_logger = get_task_logger(__name__)
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=1)
+def drop_cached_ical_task(schedule_pk):
+ OnCallSchedule = apps.get_model("schedules", "OnCallSchedule")
+
+ task_logger.info(f"Start drop_cached_ical_task for schedule {schedule_pk}")
+ try:
+ schedule = OnCallSchedule.objects.get(pk=schedule_pk)
+ schedule.drop_cached_ical()
+ except OnCallSchedule.DoesNotExist:
+ task_logger.info(f"Tried to drop_cached_ical_task for non-existing schedule {schedule_pk}")
+ task_logger.info(f"Finish drop_cached_ical_task for schedule {schedule_pk}")
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=1)
+def drop_cached_ical_for_custom_events_for_organization(organization_id):
+ OnCallScheduleCalendar = apps.get_model("schedules", "OnCallScheduleCalendar")
+
+ for schedule in OnCallScheduleCalendar.objects.filter(organization_id=organization_id):
+ drop_cached_ical_task.apply_async(
+ (schedule.pk,),
+ )
+ task_logger.info(f"drop cached ica for org_id {organization_id}")
diff --git a/engine/apps/schedules/tasks/notify_about_empty_shifts_in_schedule.py b/engine/apps/schedules/tasks/notify_about_empty_shifts_in_schedule.py
new file mode 100644
index 0000000000..5d681bb768
--- /dev/null
+++ b/engine/apps/schedules/tasks/notify_about_empty_shifts_in_schedule.py
@@ -0,0 +1,140 @@
+import pytz
+from celery.utils.log import get_task_logger
+from django.apps import apps
+from django.core.cache import cache
+from django.utils import timezone
+
+from apps.public_api.constants import DEMO_SCHEDULE_ID_CALENDAR, DEMO_SCHEDULE_ID_ICAL
+from apps.schedules.ical_utils import list_of_empty_shifts_in_schedule
+from apps.slack.utils import format_datetime_to_slack, post_message_to_channel
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+from common.utils import trim_if_needed
+
+task_logger = get_task_logger(__name__)
+
+
+@shared_dedicated_queue_retry_task()
+def start_check_empty_shifts_in_schedule():
+ OnCallSchedule = apps.get_model("schedules", "OnCallSchedule")
+
+ task_logger.info("Start start_notify_about_empty_shifts_in_schedule")
+
+ schedules = OnCallSchedule.objects.exclude(
+ public_primary_key__in=(DEMO_SCHEDULE_ID_CALENDAR, DEMO_SCHEDULE_ID_ICAL)
+ )
+
+ for schedule in schedules:
+ check_empty_shifts_in_schedule.apply_async((schedule.pk,))
+
+ task_logger.info("Finish start_notify_about_empty_shifts_in_schedule")
+
+
+@shared_dedicated_queue_retry_task()
+def check_empty_shifts_in_schedule(schedule_pk):
+ OnCallSchedule = apps.get_model("schedules", "OnCallSchedule")
+
+ task_logger.info(f"Start check_empty_shifts_in_schedule {schedule_pk}")
+
+ try:
+ schedule = OnCallSchedule.objects.get(
+ pk=schedule_pk,
+ )
+ except OnCallSchedule.DoesNotExist:
+ task_logger.info(f"Tried to check_empty_shifts_in_schedule for non-existing schedule {schedule_pk}")
+ return
+
+ schedule.check_empty_shifts_for_next_week()
+ task_logger.info(f"Finish check_empty_shifts_in_schedule {schedule_pk}")
+
+
+@shared_dedicated_queue_retry_task()
+def start_notify_about_empty_shifts_in_schedule():
+ OnCallSchedule = apps.get_model("schedules", "OnCallScheduleICal")
+
+ task_logger.info("Start start_notify_about_empty_shifts_in_schedule")
+
+ today = timezone.now().date()
+ week_ago = today - timezone.timedelta(days=7)
+ schedules = OnCallSchedule.objects.filter(
+ empty_shifts_report_sent_at__lte=week_ago,
+ channel__isnull=False,
+ ).exclude(public_primary_key__in=(DEMO_SCHEDULE_ID_CALENDAR, DEMO_SCHEDULE_ID_ICAL))
+
+ for schedule in schedules:
+ notify_about_empty_shifts_in_schedule.apply_async((schedule.pk,))
+
+ task_logger.info("Finish start_notify_about_empty_shifts_in_schedule")
+
+
+@shared_dedicated_queue_retry_task()
+def notify_about_empty_shifts_in_schedule(schedule_pk):
+ OnCallSchedule = apps.get_model("schedules", "OnCallSchedule")
+
+ task_logger.info(f"Start notify_about_empty_shifts_in_schedule {schedule_pk}")
+
+ cache_key = get_cache_key_notify_about_empty_shifts_in_schedule(schedule_pk)
+ cached_task_id = cache.get(cache_key)
+ current_task_id = notify_about_empty_shifts_in_schedule.request.id
+ if current_task_id != cached_task_id and cached_task_id is not None:
+ return
+ try:
+ schedule = OnCallSchedule.objects.get(pk=schedule_pk, channel__isnull=False)
+ except OnCallSchedule.DoesNotExist:
+ task_logger.info(f"Tried to notify_about_empty_shifts_in_schedule for non-existing schedule {schedule_pk}")
+ return
+
+ today = timezone.now().date()
+ empty_shifts = list_of_empty_shifts_in_schedule(schedule, today, today + timezone.timedelta(days=7))
+ schedule.empty_shifts_report_sent_at = today
+
+ if len(empty_shifts) != 0:
+ schedule.has_empty_shifts = True
+ text = (
+ f'Tried to parse schedule *"{schedule.name}"* and found events without associated users.\n'
+ f"To ensure you don't miss any notifications, use a Grafana username as the event name in the calendar. "
+ f"The user should have Editor or Admin access.\n\n"
+ )
+ for idx, empty_shift in enumerate(empty_shifts):
+ start_timestamp = int(empty_shift.start.astimezone(pytz.UTC).timestamp())
+ end_timestamp = int(empty_shift.end.astimezone(pytz.UTC).timestamp())
+
+ if empty_shift.summary:
+ text += f"*Title*: {trim_if_needed(empty_shift.summary)}\n"
+ if empty_shift.description:
+ text += f"*Description*: {trim_if_needed(empty_shift.description)}\n"
+ if empty_shift.attendee:
+ text += f"*Parsed from PagerDuty*: {trim_if_needed(empty_shift.attendee)}\n"
+
+ if empty_shift.all_day:
+ if empty_shift.start.day == empty_shift.end.day:
+ all_day_text = f'{empty_shift.start.strftime("%b %d")}\n'
+ else:
+ all_day_text = (
+ f'From {empty_shift.start.strftime("%b %d")} to {empty_shift.end.strftime("%b %d")}\n'
+ )
+ text += all_day_text
+ text += f"*All-day* event in {empty_shift.calendar_tz} TZ\n"
+ else:
+ text += f"From {format_datetime_to_slack(start_timestamp)} to {format_datetime_to_slack(end_timestamp)} (your TZ)\n"
+ text += f"_From {OnCallSchedule.CALENDAR_TYPE_VERBAL[empty_shift.calendar_type]} calendar_\n"
+ if idx != len(empty_shifts) - 1:
+ text += "\n\n"
+ post_message_to_channel(schedule.organization, schedule.channel, text)
+ else:
+ schedule.has_empty_shifts = False
+ schedule.save(update_fields=["empty_shifts_report_sent_at", "has_empty_shifts"])
+ task_logger.info(f"Finish notify_about_empty_shifts_in_schedule {schedule_pk}")
+
+
+def get_cache_key_notify_about_empty_shifts_in_schedule(schedule_pk):
+ CACHE_KEY_PREFIX = "notify_about_empty_shifts_in_schedule"
+ return f"{CACHE_KEY_PREFIX}_{schedule_pk}"
+
+
+@shared_dedicated_queue_retry_task
+def schedule_notify_about_empty_shifts_in_schedule(schedule_pk):
+ CACHE_LIFETIME = 600
+ START_TASK_DELAY = 60
+ task = notify_about_empty_shifts_in_schedule.apply_async(args=[schedule_pk], countdown=START_TASK_DELAY)
+ cache_key = get_cache_key_notify_about_empty_shifts_in_schedule(schedule_pk)
+ cache.set(cache_key, task.id, timeout=CACHE_LIFETIME)
diff --git a/engine/apps/schedules/tasks/notify_about_gaps_in_schedule.py b/engine/apps/schedules/tasks/notify_about_gaps_in_schedule.py
new file mode 100644
index 0000000000..4a4749f67d
--- /dev/null
+++ b/engine/apps/schedules/tasks/notify_about_gaps_in_schedule.py
@@ -0,0 +1,123 @@
+import pytz
+from celery.utils.log import get_task_logger
+from django.apps import apps
+from django.core.cache import cache
+from django.utils import timezone
+
+from apps.public_api.constants import DEMO_SCHEDULE_ID_CALENDAR, DEMO_SCHEDULE_ID_ICAL
+from apps.schedules.ical_utils import list_of_gaps_in_schedule
+from apps.slack.utils import format_datetime_to_slack, post_message_to_channel
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+task_logger = get_task_logger(__name__)
+
+
+@shared_dedicated_queue_retry_task()
+def start_check_gaps_in_schedule():
+ OnCallSchedule = apps.get_model("schedules", "OnCallSchedule")
+
+ task_logger.info("Start start_check_gaps_in_schedule")
+
+ schedules = OnCallSchedule.objects.exclude(
+ public_primary_key__in=(DEMO_SCHEDULE_ID_CALENDAR, DEMO_SCHEDULE_ID_ICAL)
+ )
+
+ for schedule in schedules:
+ check_gaps_in_schedule.apply_async((schedule.pk,))
+
+ task_logger.info("Finish start_check_gaps_in_schedule")
+
+
+@shared_dedicated_queue_retry_task()
+def check_gaps_in_schedule(schedule_pk):
+ OnCallSchedule = apps.get_model("schedules", "OnCallSchedule")
+
+ task_logger.info(f"Start check_gaps_in_schedule {schedule_pk}")
+
+ try:
+ schedule = OnCallSchedule.objects.get(
+ pk=schedule_pk,
+ )
+ except OnCallSchedule.DoesNotExist:
+ task_logger.info(f"Tried to check_gaps_in_schedule for non-existing schedule {schedule_pk}")
+ return
+
+ schedule.check_gaps_for_next_week()
+ task_logger.info(f"Finish check_gaps_in_schedule {schedule_pk}")
+
+
+@shared_dedicated_queue_retry_task()
+def start_notify_about_gaps_in_schedule():
+ OnCallSchedule = apps.get_model("schedules", "OnCallSchedule")
+
+ task_logger.info("Start start_notify_about_gaps_in_schedule")
+
+ today = timezone.now().date()
+ week_ago = today - timezone.timedelta(days=7)
+ schedules = OnCallSchedule.objects.filter(
+ gaps_report_sent_at__lte=week_ago,
+ channel__isnull=False,
+ ).exclude(public_primary_key__in=(DEMO_SCHEDULE_ID_CALENDAR, DEMO_SCHEDULE_ID_ICAL))
+
+ for schedule in schedules:
+ notify_about_gaps_in_schedule.apply_async((schedule.pk,))
+
+ task_logger.info("Finish start_notify_about_gaps_in_schedule")
+
+
+@shared_dedicated_queue_retry_task()
+def notify_about_gaps_in_schedule(schedule_pk):
+ OnCallSchedule = apps.get_model("schedules", "OnCallSchedule")
+
+ task_logger.info(f"Start notify_about_gaps_in_schedule {schedule_pk}")
+
+ cache_key = get_cache_key_notify_about_gaps_in_schedule(schedule_pk)
+ cached_task_id = cache.get(cache_key)
+ current_task_id = notify_about_gaps_in_schedule.request.id
+ if current_task_id != cached_task_id and cached_task_id is not None:
+ return
+
+ try:
+ schedule = OnCallSchedule.objects.get(pk=schedule_pk, channel__isnull=False)
+ except OnCallSchedule.DoesNotExist:
+ task_logger.info(f"Tried to notify_about_gaps_in_schedule for non-existing schedule {schedule_pk}")
+ return
+
+ today = timezone.now().date()
+ gaps = list_of_gaps_in_schedule(schedule, today, today + timezone.timedelta(days=7))
+ schedule.gaps_report_sent_at = today
+
+ if len(gaps) != 0:
+ schedule.has_gaps = True
+ text = f"There are time periods that are unassigned in *{schedule.name}* on-call schedule.\n"
+ for idx, gap in enumerate(gaps):
+ if gap.start:
+ start_verbal = format_datetime_to_slack(int(gap.start.astimezone(pytz.UTC).timestamp()))
+ else:
+ start_verbal = "..."
+ if gap.end:
+ end_verbal = format_datetime_to_slack(int(gap.end.astimezone(pytz.UTC).timestamp()))
+ else:
+ end_verbal = "..."
+ text += f"From {start_verbal} to {end_verbal} (your TZ)\n"
+ if idx != len(gaps) - 1:
+ text += "\n\n"
+ post_message_to_channel(schedule.organization, schedule.channel, text)
+ else:
+ schedule.has_gaps = False
+ schedule.save(update_fields=["gaps_report_sent_at", "has_gaps"])
+ task_logger.info(f"Finish notify_about_gaps_in_schedule {schedule_pk}")
+
+
+def get_cache_key_notify_about_gaps_in_schedule(schedule_pk):
+ CACHE_KEY_PREFIX = "notify_about_gaps_in_schedule"
+ return f"{CACHE_KEY_PREFIX}_{schedule_pk}"
+
+
+@shared_dedicated_queue_retry_task
+def schedule_notify_about_gaps_in_schedule(schedule_pk):
+ CACHE_LIFETIME = 600
+ START_TASK_DELAY = 60
+ task = notify_about_gaps_in_schedule.apply_async(args=[schedule_pk], countdown=START_TASK_DELAY)
+ cache_key = get_cache_key_notify_about_gaps_in_schedule(schedule_pk)
+ cache.set(cache_key, task.id, timeout=CACHE_LIFETIME)
diff --git a/engine/apps/schedules/tasks/refresh_ical_files.py b/engine/apps/schedules/tasks/refresh_ical_files.py
new file mode 100644
index 0000000000..083e198f6b
--- /dev/null
+++ b/engine/apps/schedules/tasks/refresh_ical_files.py
@@ -0,0 +1,63 @@
+from celery.utils.log import get_task_logger
+from django.apps import apps
+
+from apps.alerts.tasks import notify_ical_schedule_shift
+from apps.public_api.constants import DEMO_SCHEDULE_ID_CALENDAR, DEMO_SCHEDULE_ID_ICAL
+from apps.schedules.ical_utils import is_icals_equal
+from apps.schedules.tasks import notify_about_empty_shifts_in_schedule, notify_about_gaps_in_schedule
+from apps.slack.tasks import start_update_slack_user_group_for_schedules
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+task_logger = get_task_logger(__name__)
+
+
+@shared_dedicated_queue_retry_task()
+def start_refresh_ical_files():
+ OnCallSchedule = apps.get_model("schedules", "OnCallSchedule")
+
+ task_logger.info("Start refresh ical files")
+
+ schedules = OnCallSchedule.objects.all().exclude(
+ public_primary_key__in=(DEMO_SCHEDULE_ID_CALENDAR, DEMO_SCHEDULE_ID_ICAL)
+ )
+ for schedule in schedules:
+ refresh_ical_file.apply_async((schedule.pk,))
+
+ # Update Slack user groups with a delay to make sure all the schedules are refreshed
+ start_update_slack_user_group_for_schedules.apply_async(countdown=30)
+
+
+@shared_dedicated_queue_retry_task()
+def refresh_ical_file(schedule_pk):
+ OnCallSchedule = apps.get_model("schedules", "OnCallSchedule")
+
+ task_logger.info(f"Refresh ical files for schedule {schedule_pk}")
+
+ try:
+ schedule = OnCallSchedule.objects.get(pk=schedule_pk)
+ except OnCallSchedule.DoesNotExist:
+ task_logger.info(f"Tried to refresh non-existing schedule {schedule_pk}")
+ return
+
+ schedule.refresh_ical_file()
+ if schedule.channel is not None:
+ notify_ical_schedule_shift.apply_async((schedule.pk,))
+
+ run_task_primary = False
+ if schedule.cached_ical_file_primary is not None:
+ if schedule.prev_ical_file_primary is None:
+ run_task_primary = True
+ else:
+ run_task_primary = not is_icals_equal(schedule.cached_ical_file_primary, schedule.prev_ical_file_primary)
+ run_task_overrides = False
+ if schedule.cached_ical_file_overrides is not None:
+ if schedule.prev_ical_file_overrides is None:
+ run_task_overrides = True
+ else:
+ run_task_overrides = not is_icals_equal(
+ schedule.cached_ical_file_overrides, schedule.prev_ical_file_overrides
+ )
+ run_task = run_task_primary or run_task_overrides
+ if run_task:
+ notify_about_empty_shifts_in_schedule.apply_async((schedule_pk,))
+ notify_about_gaps_in_schedule.apply_async((schedule_pk,))
diff --git a/engine/apps/schedules/tests/__init__.py b/engine/apps/schedules/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/schedules/tests/calendars/calendar_with_all_day_event.ics b/engine/apps/schedules/tests/calendars/calendar_with_all_day_event.ics
new file mode 100644
index 0000000000..4d0e562c12
--- /dev/null
+++ b/engine/apps/schedules/tests/calendars/calendar_with_all_day_event.ics
@@ -0,0 +1,62 @@
+BEGIN:VCALENDAR
+PRODID:-//Google Inc//Google Calendar 70.9054//EN
+VERSION:2.0
+CALSCALE:GREGORIAN
+METHOD:PUBLISH
+X-WR-CALNAME:t
+X-WR-TIMEZONE:Asia/Yekaterinburg
+BEGIN:VTIMEZONE
+TZID:Asia/Yekaterinburg
+X-LIC-LOCATION:Asia/Yekaterinburg
+BEGIN:STANDARD
+TZOFFSETFROM:+0500
+TZOFFSETTO:+0500
+TZNAME:+05
+DTSTART:19700101T000000
+END:STANDARD
+END:VTIMEZONE
+BEGIN:VEVENT
+DTSTART;VALUE=DATE:20210127
+DTEND;VALUE=DATE:20210128
+DTSTAMP:20210127T154139Z
+UID:7q00jpu4hdlr9e3j4fftbv7kt7@google.com
+CREATED:20210127T143802Z
+DESCRIPTION:
+LAST-MODIFIED:20210127T143802Z
+LOCATION:
+SEQUENCE:0
+STATUS:CONFIRMED
+SUMMARY:@Alex
+TRANSP:TRANSPARENT
+END:VEVENT
+BEGIN:VEVENT
+DTSTART;TZID=Asia/Yekaterinburg:20210127T130000
+DTEND;TZID=Asia/Yekaterinburg:20210127T220000
+DTSTAMP:20210127T154139Z
+UID:0i0af8p6p8vfampe3r1vkog0jg@google.com
+RECURRENCE-ID;TZID=Asia/Yekaterinburg:20210127T130000
+CREATED:20210127T143553Z
+DESCRIPTION:
+LAST-MODIFIED:20210127T143705Z
+LOCATION:
+SEQUENCE:0
+STATUS:CONFIRMED
+SUMMARY:@Bob
+TRANSP:OPAQUE
+END:VEVENT
+BEGIN:VEVENT
+DTSTART;TZID=Asia/Yekaterinburg:20210124T130000
+DTEND;TZID=Asia/Yekaterinburg:20210124T220000
+RRULE:FREQ=DAILY
+DTSTAMP:20210127T154139Z
+UID:0i0af8p6p8vfampe3r1vkog0jg@google.com
+CREATED:20210127T143553Z
+DESCRIPTION:
+LAST-MODIFIED:20210127T143553Z
+LOCATION:
+SEQUENCE:0
+STATUS:CONFIRMED
+SUMMARY:@Bernard Desruisseaux
+TRANSP:OPAQUE
+END:VEVENT
+END:VCALENDAR
diff --git a/engine/apps/schedules/tests/calendars/calendar_with_edited_recurring_events.ics b/engine/apps/schedules/tests/calendars/calendar_with_edited_recurring_events.ics
new file mode 100644
index 0000000000..db9e2246f8
--- /dev/null
+++ b/engine/apps/schedules/tests/calendars/calendar_with_edited_recurring_events.ics
@@ -0,0 +1,48 @@
+BEGIN:VCALENDAR
+PRODID:-//Google Inc//Google Calendar 70.9054//EN
+VERSION:2.0
+CALSCALE:GREGORIAN
+METHOD:PUBLISH
+X-WR-CALNAME:t
+X-WR-TIMEZONE:Asia/Yekaterinburg
+BEGIN:VTIMEZONE
+TZID:Asia/Yekaterinburg
+X-LIC-LOCATION:Asia/Yekaterinburg
+BEGIN:STANDARD
+TZOFFSETFROM:+0500
+TZOFFSETTO:+0500
+TZNAME:+05
+DTSTART:19700101T000000
+END:STANDARD
+END:VTIMEZONE
+BEGIN:VEVENT
+DTSTART;TZID=Asia/Yekaterinburg:20210127T130000
+DTEND;TZID=Asia/Yekaterinburg:20210127T220000
+DTSTAMP:20210127T143711Z
+UID:0i0af8p6p8vfampe3r1vkog0jg@google.com
+RECURRENCE-ID;TZID=Asia/Yekaterinburg:20210127T130000
+CREATED:20210127T143553Z
+DESCRIPTION:
+LAST-MODIFIED:20210127T143705Z
+LOCATION:
+SEQUENCE:0
+STATUS:CONFIRMED
+SUMMARY:@Bob
+TRANSP:OPAQUE
+END:VEVENT
+BEGIN:VEVENT
+DTSTART;TZID=Asia/Yekaterinburg:20210124T130000
+DTEND;TZID=Asia/Yekaterinburg:20210124T220000
+RRULE:FREQ=DAILY
+DTSTAMP:20210127T143711Z
+UID:0i0af8p6p8vfampe3r1vkog0jg@google.com
+CREATED:20210127T143553Z
+DESCRIPTION:
+LAST-MODIFIED:20210127T143553Z
+LOCATION:
+SEQUENCE:0
+STATUS:CONFIRMED
+SUMMARY:@Bernard Desruisseaux
+TRANSP:OPAQUE
+END:VEVENT
+END:VCALENDAR
diff --git a/engine/apps/schedules/tests/calendars/calendar_with_recurring_event.ics b/engine/apps/schedules/tests/calendars/calendar_with_recurring_event.ics
new file mode 100644
index 0000000000..56c78ced5b
--- /dev/null
+++ b/engine/apps/schedules/tests/calendars/calendar_with_recurring_event.ics
@@ -0,0 +1,33 @@
+BEGIN:VCALENDAR
+PRODID:-//Google Inc//Google Calendar 70.9054//EN
+VERSION:2.0
+CALSCALE:GREGORIAN
+METHOD:PUBLISH
+X-WR-CALNAME:t
+X-WR-TIMEZONE:Asia/Yekaterinburg
+BEGIN:VTIMEZONE
+TZID:Asia/Yekaterinburg
+X-LIC-LOCATION:Asia/Yekaterinburg
+BEGIN:STANDARD
+TZOFFSETFROM:+0500
+TZOFFSETTO:+0500
+TZNAME:+05
+DTSTART:19700101T000000
+END:STANDARD
+END:VTIMEZONE
+BEGIN:VEVENT
+DTSTART;TZID=Asia/Yekaterinburg:20210124T130000
+DTEND;TZID=Asia/Yekaterinburg:20210124T220000
+RRULE:FREQ=DAILY
+DTSTAMP:20210127T143634Z
+UID:0i0af8p6p8vfampe3r1vkog0jg@google.com
+CREATED:20210127T143553Z
+DESCRIPTION:
+LAST-MODIFIED:20210127T143553Z
+LOCATION:
+SEQUENCE:0
+STATUS:CONFIRMED
+SUMMARY:@Bernard Desruisseaux
+TRANSP:OPAQUE
+END:VEVENT
+END:VCALENDAR
diff --git a/engine/apps/schedules/tests/conftest.py b/engine/apps/schedules/tests/conftest.py
new file mode 100644
index 0000000000..e436328fa5
--- /dev/null
+++ b/engine/apps/schedules/tests/conftest.py
@@ -0,0 +1,17 @@
+import os
+
+import pytest
+from icalendar import Calendar
+
+CALENDARS_FOLDER = os.path.join(os.path.dirname(os.path.abspath(__file__)), "calendars")
+
+
+@pytest.fixture()
+def get_ical():
+ def _get_ical(calendar_name):
+ path_to_calendar = os.path.join(CALENDARS_FOLDER, calendar_name)
+ with open(path_to_calendar, "rb") as file:
+ content = file.read()
+ return Calendar.from_ical(content)
+
+ return _get_ical
diff --git a/engine/apps/schedules/tests/factories.py b/engine/apps/schedules/tests/factories.py
new file mode 100644
index 0000000000..50a7e393f1
--- /dev/null
+++ b/engine/apps/schedules/tests/factories.py
@@ -0,0 +1,32 @@
+import factory
+
+from apps.schedules.models import CustomOnCallShift, OnCallScheduleCalendar, OnCallScheduleICal
+from common.utils import UniqueFaker
+
+
+class OnCallScheduleFactory(factory.DjangoModelFactory):
+ name = UniqueFaker("sentence", nb_words=2)
+
+ @classmethod
+ def get_factory_for_class(cls, klass):
+ factory_classes = OnCallScheduleFactory.__subclasses__()
+ for factory_class in factory_classes:
+ if issubclass(klass, factory_class._meta.model):
+ return factory_class
+
+
+class OnCallScheduleICalFactory(OnCallScheduleFactory):
+ class Meta:
+ model = OnCallScheduleICal
+
+
+class OnCallScheduleCalendarFactory(OnCallScheduleFactory):
+ class Meta:
+ model = OnCallScheduleCalendar
+
+
+class CustomOnCallShiftFactory(factory.DjangoModelFactory):
+ name = UniqueFaker("sentence", nb_words=2)
+
+ class Meta:
+ model = CustomOnCallShift
diff --git a/engine/apps/schedules/tests/test_amixr_users_in_ical.py b/engine/apps/schedules/tests/test_amixr_users_in_ical.py
new file mode 100644
index 0000000000..9c505ea782
--- /dev/null
+++ b/engine/apps/schedules/tests/test_amixr_users_in_ical.py
@@ -0,0 +1,103 @@
+import pytest
+
+from apps.schedules.ical_utils import users_in_ical
+
+
+@pytest.mark.skip(reason="For now ical searching works only by username")
+@pytest.mark.django_db
+def test_search_user_by_profile_display_name(
+ make_organization_with_slack_team_identity,
+ make_user_with_slack_user_identity,
+):
+ organization, slack_team_identity = make_organization_with_slack_team_identity()
+ make_user_with_slack_user_identity(slack_team_identity, profile_display_name="Alex")
+
+ assert len(users_in_ical(["Alex"], organization)) == 1
+
+
+@pytest.mark.django_db
+def test_search_user_by_username(
+ make_organization,
+ make_user,
+):
+ organization_1 = make_organization()
+ organization_2 = make_organization()
+ test_username = "Alex"
+ make_user(organization=organization_1, username=test_username)
+
+ assert len(users_in_ical([test_username], organization_1)) == 1
+ assert len(users_in_ical([test_username], organization_2)) == 0
+
+
+@pytest.mark.skip(reason="For now ical searching works only by username")
+@pytest.mark.django_db
+def test_search_by_slack_user_identity_for_different_organizations(
+ make_organization_with_slack_team_identity,
+ make_user_with_slack_user_identity,
+ make_organization,
+):
+ organization_1, slack_team_identity_1 = make_organization_with_slack_team_identity()
+ organization_2 = make_organization(slack_team_identity=slack_team_identity_1)
+ test_profile_display_name = "Alex"
+ make_user_with_slack_user_identity(
+ slack_team_identity_1, organization=organization_1, profile_display_name=test_profile_display_name
+ )
+
+ assert len(users_in_ical([test_profile_display_name], organization_1)) == 1
+ assert len(users_in_ical([test_profile_display_name], organization_2)) == 0
+
+
+@pytest.mark.skip(reason="For now ical searching works only by username")
+@pytest.mark.django_db
+def test_search_with_deleted_slack_user_identity_in_another_team(
+ make_organization_with_slack_team_identity,
+ make_user_with_slack_user_identity,
+):
+ organization_1, slack_team_identity_1 = make_organization_with_slack_team_identity()
+ organization_2, slack_team_identity_2 = make_organization_with_slack_team_identity()
+ make_user_with_slack_user_identity(
+ slack_team_identity_1, organization=organization_1, profile_display_name="Alex", deleted=False
+ )
+ make_user_with_slack_user_identity(
+ slack_team_identity_2, organization=organization_2, profile_display_name="Bob", deleted=True
+ )
+
+ assert len(users_in_ical(["Alex"], organization_1)) == 1
+
+
+@pytest.mark.skip(reason="For now ical searching works only by username")
+@pytest.mark.django_db
+def test_search_with_deleted_slack_user_identity(
+ make_team_and_user,
+ make_team_for_user,
+ make_slack_team_identity_for_team,
+ make_user_with_slack_user_identity,
+):
+ amixr_team_1, amixr_user = make_team_and_user()
+ slack_team_identity_1 = make_slack_team_identity_for_team(amixr_team_1)
+ make_user_with_slack_user_identity(amixr_user, slack_team_identity_1, profile_display_name="Alex", deleted=True)
+
+ assert len(users_in_ical(["Alex"], amixr_team_1)) == 0
+
+
+@pytest.mark.skip(reason="For now ical searching works only by username")
+@pytest.mark.django_db
+def test_search_users_with_and_without_slack_user_identity(
+ make_team,
+ make_user,
+ make_slack_team_identity_for_team,
+ make_user_with_slack_user_identity,
+):
+ amixr_team = make_team()
+ slack_team_identity = make_slack_team_identity_for_team(amixr_team)
+
+ amixr_user_with_sui = make_user()
+ # make_role(amixr_user_with_sui, amixr_team, username="Alex")
+ make_user_with_slack_user_identity(amixr_user_with_sui, slack_team_identity, profile_display_name="Alex")
+
+ # amixr_user_without_sui = make_user()
+ # make_role(amixr_user_without_sui, amixr_team, username="Bob")
+
+ assert len(users_in_ical(["Bob"], amixr_team)) == 1
+ assert len(users_in_ical(["Alex"], amixr_team)) == 1
+ assert len(users_in_ical(["Alex", "Bob"], amixr_team)) == 2
diff --git a/engine/apps/schedules/tests/test_custom_on_call_shift.py b/engine/apps/schedules/tests/test_custom_on_call_shift.py
new file mode 100644
index 0000000000..0a6468a9b8
--- /dev/null
+++ b/engine/apps/schedules/tests/test_custom_on_call_shift.py
@@ -0,0 +1,201 @@
+import pytest
+from django.utils import timezone
+
+from apps.schedules.ical_utils import list_users_to_notify_from_ical
+from apps.schedules.models import CustomOnCallShift, OnCallSchedule, OnCallScheduleCalendar
+
+
+@pytest.mark.django_db
+def test_get_on_call_users_from_single_event(make_organization_and_user, make_on_call_shift, make_schedule):
+ organization, user = make_organization_and_user()
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
+ date = timezone.now().replace(tzinfo=None, microsecond=0)
+
+ data = {
+ "priority_level": 1,
+ "start": date,
+ "duration": timezone.timedelta(seconds=10800),
+ }
+
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_SINGLE_EVENT, **data
+ )
+ on_call_shift.users.add(user)
+
+ schedule.custom_on_call_shifts.add(on_call_shift)
+
+ # user is on-call
+ date = date + timezone.timedelta(minutes=5)
+ users_on_call = list_users_to_notify_from_ical(schedule, date)
+ assert len(users_on_call) == 1
+ assert user in users_on_call
+
+
+@pytest.mark.django_db
+def test_get_on_call_users_from_recurrent_event(make_organization_and_user, make_on_call_shift, make_schedule):
+ organization, user = make_organization_and_user()
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
+ date = timezone.now().replace(tzinfo=None, microsecond=0)
+
+ data = {
+ "priority_level": 1,
+ "start": date,
+ "duration": timezone.timedelta(seconds=10800),
+ "frequency": CustomOnCallShift.FREQUENCY_DAILY,
+ "interval": 2,
+ }
+
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_RECURRENT_EVENT, **data
+ )
+ on_call_shift.users.add(user)
+
+ schedule.custom_on_call_shifts.add(on_call_shift)
+
+ # user is on-call
+ date = date + timezone.timedelta(minutes=5)
+ users_on_call = list_users_to_notify_from_ical(schedule, date)
+ assert len(users_on_call) == 1
+ assert user in users_on_call
+
+ # user is not on-call according to event recurrence rules (interval = 2)
+ date = date + timezone.timedelta(days=1)
+ users_on_call = list_users_to_notify_from_ical(schedule, date)
+ assert len(users_on_call) == 0
+
+ # user is on-call again
+ date = date + timezone.timedelta(days=1)
+ users_on_call = list_users_to_notify_from_ical(schedule, date)
+ assert len(users_on_call) == 1
+ assert user in users_on_call
+
+
+@pytest.mark.django_db
+def test_get_on_call_users_from_rolling_users_event(
+ make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
+):
+ organization, user_1 = make_organization_and_user()
+ user_2 = make_user_for_organization(organization)
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
+ now = timezone.now().replace(tzinfo=None, microsecond=0)
+
+ data = {
+ "priority_level": 1,
+ "start": now,
+ "duration": timezone.timedelta(seconds=10800),
+ "frequency": CustomOnCallShift.FREQUENCY_DAILY,
+ "interval": 2,
+ }
+ rolling_users = [[user_1], [user_2]]
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
+ )
+ on_call_shift.add_rolling_users(rolling_users)
+ schedule.custom_on_call_shifts.add(on_call_shift)
+
+ date = now + timezone.timedelta(minutes=5)
+
+ user_1_on_call_dates = [date, date + timezone.timedelta(days=4)]
+ user_2_on_call_dates = [date + timezone.timedelta(days=2), date + timezone.timedelta(days=6)]
+ nobody_on_call_dates = [
+ date + timezone.timedelta(days=1),
+ date + timezone.timedelta(days=3),
+ date + timezone.timedelta(days=5),
+ ]
+
+ for date in user_1_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, date)
+ assert len(users_on_call) == 1
+ assert user_1 in users_on_call
+
+ for date in user_2_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, date)
+ assert len(users_on_call) == 1
+ assert user_2 in users_on_call
+
+ for date in nobody_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, date)
+ assert len(users_on_call) == 0
+
+
+@pytest.mark.django_db
+def test_get_oncall_users_for_empty_schedule(
+ make_organization,
+ make_schedule,
+):
+ organization = make_organization()
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
+ schedules = OnCallSchedule.objects.filter(pk=schedule.pk)
+
+ assert schedules.get_oncall_users() == []
+
+
+@pytest.mark.django_db
+def test_get_oncall_users_for_multiple_schedules(
+ make_organization,
+ make_user_for_organization,
+ make_on_call_shift,
+ make_schedule,
+):
+ organization = make_organization()
+
+ user_1 = make_user_for_organization(organization)
+ user_2 = make_user_for_organization(organization)
+ user_3 = make_user_for_organization(organization)
+
+ schedule_1 = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
+ schedule_2 = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
+
+ now = timezone.now().replace(tzinfo=None, microsecond=0)
+
+ on_call_shift_1 = make_on_call_shift(
+ organization=organization,
+ shift_type=CustomOnCallShift.TYPE_SINGLE_EVENT,
+ priority_level=1,
+ start=now,
+ duration=timezone.timedelta(minutes=30),
+ )
+
+ on_call_shift_2 = make_on_call_shift(
+ organization=organization,
+ shift_type=CustomOnCallShift.TYPE_SINGLE_EVENT,
+ priority_level=1,
+ start=now,
+ duration=timezone.timedelta(minutes=10),
+ )
+
+ on_call_shift_3 = make_on_call_shift(
+ organization=organization,
+ shift_type=CustomOnCallShift.TYPE_SINGLE_EVENT,
+ priority_level=1,
+ start=now + timezone.timedelta(minutes=10),
+ duration=timezone.timedelta(minutes=30),
+ )
+
+ on_call_shift_1.users.add(user_1)
+ on_call_shift_1.users.add(user_2)
+
+ on_call_shift_2.users.add(user_2)
+
+ on_call_shift_3.users.add(user_3)
+
+ schedule_1.custom_on_call_shifts.add(on_call_shift_1)
+
+ schedule_2.custom_on_call_shifts.add(on_call_shift_2)
+ schedule_2.custom_on_call_shifts.add(on_call_shift_3)
+
+ schedules = OnCallSchedule.objects.filter(pk__in=[schedule_1.pk, schedule_2.pk])
+
+ expected = set(schedules.get_oncall_users(events_datetime=now + timezone.timedelta(seconds=1)))
+ assert expected == {user_1, user_2}
+
+ expected = set(schedules.get_oncall_users(events_datetime=now + timezone.timedelta(minutes=10, seconds=1)))
+ assert expected == {user_1, user_2, user_3}
+
+ assert schedules.get_oncall_users(events_datetime=now + timezone.timedelta(minutes=30, seconds=1)) == [user_3]
+
+ assert schedules.get_oncall_users(events_datetime=now + timezone.timedelta(minutes=40, seconds=1)) == []
diff --git a/engine/apps/schedules/tests/test_ical_proxy.py b/engine/apps/schedules/tests/test_ical_proxy.py
new file mode 100644
index 0000000000..4dc0620807
--- /dev/null
+++ b/engine/apps/schedules/tests/test_ical_proxy.py
@@ -0,0 +1,52 @@
+from datetime import datetime
+
+import pytz
+from django.utils import timezone
+
+from apps.schedules.ical_events import ical_events
+
+
+def test_recurring_ical_events(get_ical):
+ calendar = get_ical("calendar_with_recurring_event.ics")
+ day_to_check_iso = "2021-01-27T15:27:14.448059+00:00"
+ day_to_check = timezone.datetime.fromisoformat(day_to_check_iso)
+ events = ical_events.get_events_from_ical_between(
+ calendar,
+ day_to_check - timezone.timedelta(days=1),
+ day_to_check + timezone.timedelta(days=1),
+ )
+ assert len(events) == 3
+ assert events[0]["SUMMARY"] == "@Bernard Desruisseaux"
+ assert events[1]["SUMMARY"] == "@Bernard Desruisseaux"
+ assert events[2]["SUMMARY"] == "@Bernard Desruisseaux"
+
+
+def test_recurring_ical_events_with_edited_event(get_ical):
+ calendar = get_ical("calendar_with_edited_recurring_events.ics")
+ day_to_check_iso = "2021-01-27T15:27:14.448059+00:00"
+ day_to_check = timezone.datetime.fromisoformat(day_to_check_iso)
+ events = ical_events.get_events_from_ical_between(
+ calendar,
+ day_to_check - timezone.timedelta(days=1),
+ day_to_check + timezone.timedelta(days=1),
+ )
+ assert len(events) == 3
+ assert events[0]["SUMMARY"] == "@Bob"
+ assert events[1]["SUMMARY"] == "@Bernard Desruisseaux"
+ assert events[2]["SUMMARY"] == "@Bernard Desruisseaux"
+
+
+def test_recurring_ical_events_with_all_day_event(get_ical):
+ calendar = get_ical("calendar_with_all_day_event.ics")
+ day_to_check_iso = "2021-01-27T15:27:14.448059+00:00"
+ parsed_iso_day_to_check = datetime.fromisoformat(day_to_check_iso).replace(tzinfo=pytz.UTC)
+ events = ical_events.get_events_from_ical_between(
+ calendar,
+ parsed_iso_day_to_check - timezone.timedelta(days=1),
+ parsed_iso_day_to_check + timezone.timedelta(days=1),
+ )
+ assert len(events) == 4
+ assert events[0]["SUMMARY"] == "@Alex"
+ assert events[1]["SUMMARY"] == "@Bob"
+ assert events[2]["SUMMARY"] == "@Bernard Desruisseaux"
+ assert events[3]["SUMMARY"] == "@Bernard Desruisseaux"
diff --git a/engine/apps/sendgridapp/__init__.py b/engine/apps/sendgridapp/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/sendgridapp/constants.py b/engine/apps/sendgridapp/constants.py
new file mode 100644
index 0000000000..b6d8479331
--- /dev/null
+++ b/engine/apps/sendgridapp/constants.py
@@ -0,0 +1,49 @@
+class SendgridEmailMessageStatuses(object):
+ """
+ https://sendgrid.com/docs/for-developers/tracking-events/event/#delivery-events
+ """
+
+ # Delivery events
+ ACCEPTED = 10
+ PROCESSED = 20
+ DEFERRED = 30
+ DELIVERED = 40
+ DROPPED = 50
+ BOUNCE = 60 # "event": "bounce", "type: "bounce"
+ BLOCKED = 70 # "event": "bounce", "type: "blocked"
+
+ # Engagement events
+ OPEN = 80
+ CLICK = 90
+ UNSUBSCRIBE = 100
+ SPAMREPORT = 110
+ # Group Unsubscribe - ?
+ # Group Resubscribe - ?
+
+ CHOICES = (
+ (ACCEPTED, "accepted"),
+ (PROCESSED, "processed"),
+ (DEFERRED, "deferred"),
+ (DELIVERED, "delivered"),
+ (DROPPED, "dropped"),
+ (BOUNCE, "bounce"),
+ (BLOCKED, "blocked"),
+ (OPEN, "open"),
+ (CLICK, "click"),
+ (UNSUBSCRIBE, "unsubscribe"),
+ (SPAMREPORT, "spamreport"),
+ )
+
+ DETERMINANT = {
+ "accepted": ACCEPTED,
+ "processed": PROCESSED,
+ "deferred": DEFERRED,
+ "delivered": DELIVERED,
+ "dropped": DROPPED,
+ "bounce": BOUNCE,
+ "blocked": BLOCKED,
+ "open": OPEN,
+ "click": CLICK,
+ "unsubscribe": UNSUBSCRIBE,
+ "spamreport": SPAMREPORT,
+ }
diff --git a/engine/apps/sendgridapp/migrations/__init__.py b/engine/apps/sendgridapp/migrations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/sendgridapp/models.py b/engine/apps/sendgridapp/models.py
new file mode 100644
index 0000000000..cd717165a7
--- /dev/null
+++ b/engine/apps/sendgridapp/models.py
@@ -0,0 +1,185 @@
+import logging
+import uuid
+
+from django.apps import apps
+from django.db import models
+from python_http_client.exceptions import BadRequestsError, ForbiddenError, UnauthorizedError
+from sendgrid import SendGridAPIClient
+from sendgrid.helpers.mail import CustomArg, Mail
+
+from apps.alerts.incident_appearance.renderers.email_renderer import AlertGroupEmailRenderer
+from apps.alerts.signals import user_notification_action_triggered_signal
+from apps.base.utils import live_settings
+from apps.sendgridapp.constants import SendgridEmailMessageStatuses
+
+logger = logging.getLogger(__name__)
+
+
+class EmailMessageManager(models.Manager):
+ def update_status(self, message_uuid, message_status):
+ """The function checks existence of EmailMessage
+ instance according to message_uuid and updates status on
+ message_status
+
+ Args:
+ message_uuid (str): uuid of Email message
+ message_status (str): new status
+
+ Returns:
+
+ """
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+
+ if message_uuid and message_status:
+ email_message_qs = self.filter(message_uuid=message_uuid)
+ status = SendgridEmailMessageStatuses.DETERMINANT.get(message_status)
+
+ if email_message_qs.exists() and status:
+ email_message_qs.update(status=status)
+
+ email_message = email_message_qs.first()
+ log_record = None
+
+ if status == SendgridEmailMessageStatuses.DELIVERED:
+ log_record = UserNotificationPolicyLogRecord(
+ author=email_message.receiver,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_SUCCESS,
+ notification_policy=email_message.notification_policy,
+ alert_group=email_message.represents_alert_group,
+ notification_step=email_message.notification_policy.step
+ if email_message.notification_policy
+ else None,
+ notification_channel=email_message.notification_policy.notify_by
+ if email_message.notification_policy
+ else None,
+ )
+ elif status in [
+ SendgridEmailMessageStatuses.BOUNCE,
+ SendgridEmailMessageStatuses.BLOCKED,
+ SendgridEmailMessageStatuses.DROPPED,
+ ]:
+ log_record = UserNotificationPolicyLogRecord(
+ author=email_message.receiver,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=email_message.notification_policy,
+ alert_group=email_message.represents_alert_group,
+ notification_error_code=email_message.get_error_code_by_sendgrid_status(status),
+ notification_step=email_message.notification_policy.step
+ if email_message.notification_policy
+ else None,
+ notification_channel=email_message.notification_policy.notify_by
+ if email_message.notification_policy
+ else None,
+ )
+ if log_record is not None:
+ log_record.save()
+ user_notification_action_triggered_signal.send(
+ sender=EmailMessage.objects.update_status, log_record=log_record
+ )
+
+
+class EmailMessage(models.Model):
+ objects = EmailMessageManager()
+
+ message_uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
+
+ exceeded_limit = models.BooleanField(null=True, default=None)
+ represents_alert = models.ForeignKey("alerts.Alert", on_delete=models.SET_NULL, null=True, default=None)
+ represents_alert_group = models.ForeignKey("alerts.AlertGroup", on_delete=models.SET_NULL, null=True, default=None)
+ notification_policy = models.ForeignKey(
+ "base.UserNotificationPolicy", on_delete=models.SET_NULL, null=True, default=None
+ )
+
+ receiver = models.ForeignKey("user_management.User", on_delete=models.PROTECT, null=True, default=None)
+
+ status = models.PositiveSmallIntegerField(blank=True, null=True, choices=SendgridEmailMessageStatuses.CHOICES)
+
+ created_at = models.DateTimeField(auto_now_add=True)
+
+ @staticmethod
+ def send_incident_mail(user, alert_group, notification_policy):
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+
+ log_record = None
+ alert = alert_group.alerts.first()
+
+ email_message = EmailMessage(
+ represents_alert_group=alert_group,
+ represents_alert=alert,
+ receiver=user,
+ notification_policy=notification_policy,
+ )
+ emails_left = alert_group.channel.organization.emails_left(user)
+ if emails_left > 0:
+ email_message.exceeded_limit = False
+
+ limit_notification = False
+ if emails_left < 5:
+ limit_notification = True
+
+ subject, html_content = AlertGroupEmailRenderer(alert_group).render(limit_notification)
+
+ message = Mail(
+ from_email=live_settings.SENDGRID_FROM_EMAIL,
+ to_emails=user.email,
+ subject=subject,
+ html_content=html_content,
+ )
+ custom_arg = CustomArg("message_uuid", str(email_message.message_uuid))
+ message.add_custom_arg(custom_arg)
+
+ sendgrid_client = SendGridAPIClient(live_settings.SENDGRID_API_KEY)
+ try:
+ response = sendgrid_client.send(message)
+ sending_status = True
+ except (BadRequestsError, UnauthorizedError, ForbiddenError) as e:
+ logger.error(f"Error email sending: {e}")
+ sending_status = False
+ else:
+ if response.status_code == 202:
+ email_message.status = SendgridEmailMessageStatuses.ACCEPTED
+ email_message.save()
+ else:
+ logger.error(f"Error email sending: status code: {response.status_code}")
+ sending_status = False
+
+ if not sending_status:
+ log_record = UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_NOT_ABLE_TO_SEND_MAIL,
+ notification_step=notification_policy.step if notification_policy else None,
+ notification_channel=notification_policy.notify_by if notification_policy else None,
+ )
+ else:
+ log_record = UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_MAIL_LIMIT_EXCEEDED,
+ notification_step=notification_policy.step if notification_policy else None,
+ notification_channel=notification_policy.notify_by if notification_policy else None,
+ )
+ email_message.exceeded_limit = True
+ email_message.save()
+
+ if log_record is not None:
+ log_record.save()
+ user_notification_action_triggered_signal.send(
+ sender=EmailMessage.send_incident_mail, log_record=log_record
+ )
+
+ @staticmethod
+ def get_error_code_by_sendgrid_status(status):
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+
+ SENDGRID_ERRORS_TO_ERROR_CODES_MAP = {
+ SendgridEmailMessageStatuses.BOUNCE: UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_MAIL_DELIVERY_FAILED,
+ SendgridEmailMessageStatuses.BLOCKED: UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_MAIL_DELIVERY_FAILED,
+ SendgridEmailMessageStatuses.DROPPED: UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_MAIL_DELIVERY_FAILED,
+ }
+
+ return SENDGRID_ERRORS_TO_ERROR_CODES_MAP.get(status, None)
diff --git a/engine/apps/sendgridapp/parse.py b/engine/apps/sendgridapp/parse.py
new file mode 100644
index 0000000000..55cc939ccc
--- /dev/null
+++ b/engine/apps/sendgridapp/parse.py
@@ -0,0 +1,119 @@
+import base64
+import email
+import mimetypes
+
+from six import iteritems
+from werkzeug.utils import secure_filename
+
+
+class Parse(object):
+ """Parse data received from the SendGrid Inbound Parse webhook.
+ It's based on https://github.com/sendgrid/sendgrid-python/blob/master/sendgrid/helpers/inbound/parse.py
+ """
+
+ def __init__(self, request):
+ self._keys = [
+ "attachments",
+ "headers",
+ "text",
+ "envelope",
+ "to",
+ "html",
+ "sender_ip",
+ "attachment-info",
+ "subject",
+ "dkim",
+ "SPF",
+ "charsets",
+ "content-ids",
+ "spam_report",
+ "spam_score",
+ "email",
+ ]
+ self._request = request
+ self._payload = request.POST.dict()
+ self._raw_payload = request.POST
+
+ def key_values(self):
+ """
+ Return a dictionary of key/values in the payload received from
+ the webhook
+ """
+ key_values = {}
+ for key in self.keys:
+ if key in self.payload:
+ key_values[key] = self.payload[key]
+ return key_values
+
+ def get_raw_email(self):
+ """
+ This only applies to raw payloads:
+ https://sendgrid.com/docs/Classroom/Basics/Inbound_Parse_Webhook/setting_up_the_inbound_parse_webhook.html#-Raw-Parameters
+ """
+ if "email" in self.payload:
+ raw_email = email.message_from_string(self.payload["email"])
+ return raw_email
+ else:
+ return None
+
+ def attachments(self):
+ """Returns an object with:
+ type = file content type
+ file_name = the name of the file
+ contents = base64 encoded file contents"""
+ attachments = None
+ if "attachment-info" in self.payload:
+ attachments = self._get_attachments(self.request)
+ # Check if we have a raw message
+ raw_email = self.get_raw_email()
+ if raw_email is not None:
+ attachments = self._get_attachments_raw(raw_email)
+ return attachments
+
+ def _get_attachments(self, request):
+ attachments = []
+ for _, filestorage in iteritems(request.files):
+ attachment = {}
+ if filestorage.filename not in (None, "fdopen", ""):
+ filename = secure_filename(filestorage.filename)
+ attachment["type"] = filestorage.content_type
+ attachment["file_name"] = filename
+ attachment["contents"] = base64.b64encode(filestorage.read())
+ attachments.append(attachment)
+ return attachments
+
+ def _get_attachments_raw(self, raw_email):
+ attachments = []
+ counter = 1
+ for part in raw_email.walk():
+ attachment = {}
+ if part.get_content_maintype() == "multipart":
+ continue
+ filename = part.get_filename()
+ if not filename:
+ ext = mimetypes.guess_extension(part.get_content_type())
+ if not ext:
+ ext = ".bin"
+ filename = "part-%03d%s" % (counter, ext)
+ counter += 1
+ attachment["type"] = part.get_content_type()
+ attachment["file_name"] = filename
+ attachment["contents"] = part.get_payload(decode=False)
+ attachments.append(attachment)
+ return attachments
+
+ @property
+ def keys(self):
+ return self._keys
+
+ @property
+ def request(self):
+ return self._request
+
+ @property
+ def payload(self):
+ return self._payload
+
+ @property
+ def raw_payload(self):
+ return self._raw_payload
diff --git a/engine/apps/sendgridapp/permissions.py b/engine/apps/sendgridapp/permissions.py
new file mode 100644
index 0000000000..7c2206e3b2
--- /dev/null
+++ b/engine/apps/sendgridapp/permissions.py
@@ -0,0 +1,14 @@
+from rest_framework.permissions import BasePermission
+
+from apps.base.utils import live_settings
+
+
+class AllowOnlySendgrid(BasePermission):
+ def has_permission(self, request, view):
+ # https://stackoverflow.com/questions/20865673/sendgrid-incoming-mail-webhook-how-do-i-secure-my-endpoint
+ sendgrid_key = request.query_params.get("key")
+
+ if sendgrid_key is None:
+ return False
+
+ return live_settings.SENDGRID_SECRET_KEY == sendgrid_key
diff --git a/engine/apps/sendgridapp/templates/email_notification.html b/engine/apps/sendgridapp/templates/email_notification.html
new file mode 100644
index 0000000000..e591944d3a
--- /dev/null
+++ b/engine/apps/sendgridapp/templates/email_notification.html
@@ -0,0 +1,26 @@
+
+
+{% now "H:i.u e"%}
+
+You are invited to check Incident
+
+{{ title }}
+{% if message %}
+ {{ message|linebreaks }}
+{% endif %}
+{# #}
+{# #}
+Amixr team: {{ amixr_team }}
+
+Alert channel: {{ alert_channel }}
+
+Check Incident
+
+Your Amixr.IO
+{% if limit_notification %}
+
+ {{ emails_left }} mail(s) left for this week. Contact your admin.
+{% endif %}
+
+{% now "H:i.u e"%}
+
\ No newline at end of file
diff --git a/engine/apps/sendgridapp/templates/email_verification.html b/engine/apps/sendgridapp/templates/email_verification.html
new file mode 100644
index 0000000000..468daf41d2
--- /dev/null
+++ b/engine/apps/sendgridapp/templates/email_verification.html
@@ -0,0 +1,15 @@
+
+{% now "H:i.u e"%}
+
+Welcome to OnCall!
+
+To verify your email address, please click the button below. If you did not sign up for OnCall, please ignore this email.
+
+Confirm email
+
+Thanks,
+
+OnCall Team
+
+{% now "H:i.u e"%}
+
\ No newline at end of file
diff --git a/engine/apps/sendgridapp/tests/__init__.py b/engine/apps/sendgridapp/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/sendgridapp/tests/factories.py b/engine/apps/sendgridapp/tests/factories.py
new file mode 100644
index 0000000000..e27fd4582d
--- /dev/null
+++ b/engine/apps/sendgridapp/tests/factories.py
@@ -0,0 +1,8 @@
+# import factory
+#
+# from apps.sendgridapp.models import EmailMessage
+#
+#
+# class EmailMessageFactory(factory.DjangoModelFactory):
+# class Meta:
+# model = EmailMessage
diff --git a/engine/apps/sendgridapp/tests/test_emails.py b/engine/apps/sendgridapp/tests/test_emails.py
new file mode 100644
index 0000000000..7d29c42895
--- /dev/null
+++ b/engine/apps/sendgridapp/tests/test_emails.py
@@ -0,0 +1,135 @@
+# from unittest.mock import patch
+#
+# import pytest
+# from django.urls import reverse
+# from django.utils import timezone
+# from rest_framework.test import APIClient
+#
+# from apps.sendgridapp.constants import SendgridEmailMessageStatuses
+# from apps.sendgridapp.verification_token import email_verification_token_generator
+#
+#
+# @pytest.mark.skip(reason="email disabled")
+# @pytest.mark.django_db
+# def test_email_verification(
+# make_team,
+# make_user_for_team,
+# make_email_message,
+# make_alert_receive_channel,
+# make_alert_group,
+# ):
+# amixr_team = make_team()
+# admin = make_user_for_team(amixr_team, role=ROLE_ADMIN)
+# alert_receive_channel = make_alert_receive_channel(amixr_team)
+# alert_group = make_alert_group(alert_receive_channel)
+# make_email_message(
+# receiver=admin, status=SendgridEmailMessageStatuses.ACCEPTED, represents_alert_group=alert_group
+# ),
+# client = APIClient()
+# correct_token = email_verification_token_generator.make_token(admin)
+# url = reverse("sendgridapp:verify_email", kwargs={"token": correct_token, "uid": admin.pk, "slackteam": None})
+# response = client.get(url, content_type="application/json")
+# assert response.status_code == 200
+# admin.refresh_from_db()
+# assert admin.email_verified is True
+#
+#
+# @pytest.mark.skip(reason="email disabled")
+# @pytest.mark.django_db
+# def test_email_verification_incorrect_token(
+# make_team,
+# make_user_for_team,
+# make_email_message,
+# make_alert_receive_channel,
+# make_alert_group,
+# ):
+# amixr_team = make_team()
+# admin = make_user_for_team(amixr_team, role=ROLE_ADMIN)
+# alert_receive_channel = make_alert_receive_channel(amixr_team)
+# alert_group = make_alert_group(alert_receive_channel)
+# make_email_message(
+# receiver=admin, status=SendgridEmailMessageStatuses.ACCEPTED, represents_alert_group=alert_group
+# ),
+#
+# client = APIClient()
+# url = reverse("sendgridapp:verify_email", kwargs={"token": "incorrect_token", "uid": admin.pk, "slackteam": None})
+#
+# response = client.get(path=url, content_type="application/json")
+# assert response.status_code == 403
+# admin.refresh_from_db()
+# assert admin.email_verified is False
+#
+#
+# @pytest.mark.skip(reason="email disabled")
+# @pytest.mark.django_db
+# def test_email_verification_incorrect_uid(
+# make_team,
+# make_user_for_team,
+# make_email_message,
+# make_alert_receive_channel,
+# make_alert_group,
+# ):
+# amixr_team = make_team()
+# admin = make_user_for_team(amixr_team, role=ROLE_ADMIN)
+# alert_receive_channel = make_alert_receive_channel(amixr_team)
+# alert_group = make_alert_group(alert_receive_channel)
+# make_email_message(
+# receiver=admin, status=SendgridEmailMessageStatuses.ACCEPTED, represents_alert_group=alert_group
+# ),
+# client = APIClient()
+#
+# correct_token = email_verification_token_generator.make_token(admin)
+# url = reverse(
+# "sendgridapp:verify_email", kwargs={"token": correct_token, "uid": 100, "slackteam": None} # incorrect user uid
+# )
+# response = client.get(path=url, content_type="application/json")
+# assert response.status_code == 403
+# admin.refresh_from_db()
+# assert admin.email_verified is False
+#
+#
+# @pytest.mark.skip(reason="email disabled")
+# @patch("apps.integrations.helpers.inbound_emails.AllowOnlySendgrid.has_permission", return_value=True)
+# @patch(
+# "apps.slack.helpers.slack_client.SlackClientWithErrorHandling.api_call",
+# return_value={"ok": True, "ts": timezone.now().timestamp()},
+# )
+# @pytest.mark.django_db
+# @pytest.mark.parametrize("status", ["delivered", "bounce", "dropped"])
+# def test_update_email_status(
+# mocked_slack_api_call,
+# mocked_sendgrid_permission,
+# make_team,
+# make_user_for_team,
+# make_email_message,
+# make_alert_receive_channel,
+# make_alert_group,
+# status,
+# ):
+# """The test for Email message status update via api"""
+# amixr_team = make_team()
+# admin = make_user_for_team(amixr_team, role=ROLE_ADMIN)
+# alert_receive_channel = make_alert_receive_channel(amixr_team)
+# alert_group = make_alert_group(alert_receive_channel)
+# email_message = make_email_message(
+# receiver=admin, status=SendgridEmailMessageStatuses.ACCEPTED, represents_alert_group=alert_group
+# )
+# client = APIClient()
+# url = reverse("sendgridapp:email_status_event")
+#
+# data = [
+# {
+# "message_uuid": str(email_message.message_uuid),
+# "event": status,
+# }
+# ]
+# response = client.post(
+# url,
+# data,
+# format="json",
+# )
+#
+# assert response.status_code == 204
+# assert response.data == ""
+# email_message.refresh_from_db()
+# assert email_message.status == SendgridEmailMessageStatuses.DETERMINANT[status]
diff --git a/engine/apps/sendgridapp/urls.py b/engine/apps/sendgridapp/urls.py
new file mode 100644
index 0000000000..1419df3253
--- /dev/null
+++ b/engine/apps/sendgridapp/urls.py
@@ -0,0 +1,9 @@
+from django.urls import path
+
+from apps.sendgridapp.views import EmailStatusCallback
+
+app_name = "sendgridapp"
+
+urlpatterns = [
+ path(r"email_status_event/", EmailStatusCallback.as_view(), name="email_status_event"),
+]
diff --git a/engine/apps/sendgridapp/verification_token.py b/engine/apps/sendgridapp/verification_token.py
new file mode 100644
index 0000000000..3efc97c76c
--- /dev/null
+++ b/engine/apps/sendgridapp/verification_token.py
@@ -0,0 +1,20 @@
+"""Based on example https://simpleisbetterthancomplex.com/tutorial/2016/08/24/how-to-create-one-time-link.html"""
+
+from django.conf import settings
+from django.contrib.auth.tokens import PasswordResetTokenGenerator
+
+
+class EmailVerificationTokenGenerator(PasswordResetTokenGenerator):
+ # There are the default setting of PASSWORD_RESET_TIMEOUT_DAYS = 3 (days)
+
+ key_salt = "EmailVerificationTokenGenerator" + settings.TOKEN_SALT
+ secret = settings.TOKEN_SECRET
+
+ def _make_hash_value(self, user, timestamp):
+ team_datetime_timestamp = (
+ "" if user.teams.first() is None else user.teams.first().datetime.replace(microsecond=0, tzinfo=None)
+ )
+ return str(user.pk) + str(timestamp) + str(team_datetime_timestamp) + str(user.email_verified)
+
+
+email_verification_token_generator = EmailVerificationTokenGenerator()
diff --git a/engine/apps/sendgridapp/views.py b/engine/apps/sendgridapp/views.py
new file mode 100644
index 0000000000..538df2d837
--- /dev/null
+++ b/engine/apps/sendgridapp/views.py
@@ -0,0 +1,29 @@
+import logging
+
+from django.apps import apps
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.sendgridapp.permissions import AllowOnlySendgrid
+
+logger = logging.getLogger(__name__)
+
+
+# Receive Email Status Update from Sendgrid
+class EmailStatusCallback(APIView):
+ # https://sendgrid.com/docs/for-developers/tracking-events/event/#delivery-events
+ permission_classes = [AllowOnlySendgrid]
+
+ def post(self, request):
+ for data in request.data:
+ message_uuid = data.get("message_uuid")
+ message_status = data.get("event")
+ if message_status is not None and "type" in message_status:
+ message_status = message_status["type"]
+ logger.info(f"UUID: {message_uuid}, Status: {message_status}")
+
+ EmailMessage = apps.get_model("sendgridapp", "EmailMessage")
+ EmailMessage.objects.update_status(message_uuid=message_uuid, message_status=message_status)
+
+ return Response(data="", status=status.HTTP_204_NO_CONTENT)
diff --git a/engine/apps/slack/__init__.py b/engine/apps/slack/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/slack/admin.py b/engine/apps/slack/admin.py
new file mode 100644
index 0000000000..88cfb86c65
--- /dev/null
+++ b/engine/apps/slack/admin.py
@@ -0,0 +1,31 @@
+from django.contrib import admin
+
+from common.admin import CustomModelAdmin
+
+from .models import SlackActionRecord, SlackMessage, SlackTeamIdentity, SlackUserIdentity
+
+
+@admin.register(SlackTeamIdentity)
+class SlackTeamIdentityAdmin(CustomModelAdmin):
+ list_display = ("id", "slack_id", "cached_name", "datetime")
+ list_filter = ("datetime",)
+
+
+@admin.register(SlackUserIdentity)
+class SlackUserIdentityAdmin(CustomModelAdmin):
+ list_display = ("id", "slack_id", "slack_team_identity", "cached_name", "cached_slack_email")
+
+ def get_queryset(self, request):
+ return SlackUserIdentity.all_objects
+
+
+@admin.register(SlackMessage)
+class SlackMessageAdmin(CustomModelAdmin):
+ list_display = ("id", "slack_id", "_slack_team_identity", "alert_group", "created_at")
+ list_filter = ("created_at",)
+
+
+@admin.register(SlackActionRecord)
+class SlackActionRecordAdmin(CustomModelAdmin):
+ list_display = ("id", "organization", "user", "step", "datetime")
+ list_filter = ("datetime",)
diff --git a/engine/apps/slack/constants.py b/engine/apps/slack/constants.py
new file mode 100644
index 0000000000..24572538f1
--- /dev/null
+++ b/engine/apps/slack/constants.py
@@ -0,0 +1,11 @@
+from django.utils import timezone
+
+SLACK_BOT_ID = "USLACKBOT"
+SLACK_INVALID_AUTH_RESPONSE = "no_enough_permissions_to_retrieve"
+PLACEHOLDER = "Placeholder"
+
+SLACK_WRONG_TEAM_NAMES = [SLACK_INVALID_AUTH_RESPONSE, PLACEHOLDER]
+
+SLACK_RATE_LIMIT_TIMEOUT = timezone.timedelta(minutes=5)
+SLACK_RATE_LIMIT_DELAY = 10
+CACHE_UPDATE_INCIDENT_SLACK_MESSAGE_LIFETIME = 60 * 10
diff --git a/engine/apps/slack/migrations/0001_squashed_initial.py b/engine/apps/slack/migrations/0001_squashed_initial.py
new file mode 100644
index 0000000000..7bc20cbb68
--- /dev/null
+++ b/engine/apps/slack/migrations/0001_squashed_initial.py
@@ -0,0 +1,132 @@
+# Generated by Django 3.2.5 on 2022-05-31 14:46
+
+import apps.slack.models.slack_channel
+import apps.slack.models.slack_usergroup
+import django.core.validators
+from django.db import migrations, models
+import django.db.models.deletion
+import uuid
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ('alerts', '0001_squashed_initial'),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='SlackActionRecord',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('step', models.CharField(default=None, max_length=100, null=True)),
+ ('payload', models.TextField(default=None, null=True)),
+ ('datetime', models.DateTimeField(auto_now_add=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='SlackChannel',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('public_primary_key', models.CharField(default=apps.slack.models.slack_channel.generate_public_primary_key_for_slack_channel, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('slack_id', models.CharField(max_length=100)),
+ ('name', models.CharField(max_length=500)),
+ ('is_archived', models.BooleanField(default=False)),
+ ('is_shared', models.BooleanField(default=None, null=True)),
+ ('last_populated', models.DateField(default=None, null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='SlackTeamIdentity',
+ fields=[
+ ('id', models.AutoField(primary_key=True, serialize=False)),
+ ('slack_id', models.CharField(max_length=100)),
+ ('cached_name', models.CharField(default=None, max_length=100, null=True)),
+ ('cached_app_id', models.CharField(default=None, max_length=100, null=True)),
+ ('access_token', models.CharField(default=None, max_length=100, null=True)),
+ ('bot_user_id', models.CharField(default=None, max_length=100, null=True)),
+ ('bot_access_token', models.CharField(default=None, max_length=100, null=True)),
+ ('oauth_scope', models.TextField(default=None, max_length=30000, null=True)),
+ ('detected_token_revoked', models.DateTimeField(default=None, null=True, verbose_name='Deleted At')),
+ ('is_profile_populated', models.BooleanField(default=False)),
+ ('datetime', models.DateTimeField(auto_now_add=True)),
+ ('installed_via_granular_permissions', models.BooleanField(default=True)),
+ ('last_populated', models.DateTimeField(default=None, null=True)),
+ ('cached_bot_id', models.CharField(default=None, max_length=100, null=True)),
+ ('cached_reinstall_data', models.JSONField(default=None, null=True)),
+ ],
+ options={
+ 'ordering': ('datetime',),
+ },
+ ),
+ migrations.CreateModel(
+ name='SlackUserIdentity',
+ fields=[
+ ('id', models.AutoField(primary_key=True, serialize=False)),
+ ('slack_id', models.CharField(max_length=100)),
+ ('cached_slack_email', models.EmailField(blank=True, default='', max_length=254)),
+ ('cached_im_channel_id', models.CharField(default=None, max_length=100, null=True)),
+ ('cached_phone_number', models.CharField(default=None, max_length=20, null=True)),
+ ('cached_country_code', models.CharField(default=None, max_length=3, null=True)),
+ ('cached_timezone', models.CharField(default=None, max_length=100, null=True)),
+ ('cached_slack_login', models.CharField(default=None, max_length=100, null=True)),
+ ('cached_avatar', models.URLField(default=None, null=True)),
+ ('cached_name', models.CharField(default=None, max_length=200, null=True)),
+ ('phone_from_onboarding', models.BooleanField(default=False)),
+ ('cached_is_bot', models.BooleanField(default=None, null=True)),
+ ('profile_real_name_normalized', models.CharField(default=None, max_length=200, null=True)),
+ ('profile_display_name', models.CharField(default=None, max_length=200, null=True)),
+ ('profile_display_name_normalized', models.CharField(default=None, max_length=200, null=True)),
+ ('profile_real_name', models.CharField(default=None, max_length=200, null=True)),
+ ('deleted', models.BooleanField(default=None, null=True)),
+ ('is_admin', models.BooleanField(default=None, null=True)),
+ ('is_owner', models.BooleanField(default=None, null=True)),
+ ('is_primary_owner', models.BooleanField(default=None, null=True)),
+ ('is_restricted', models.BooleanField(default=None, null=True)),
+ ('is_ultra_restricted', models.BooleanField(default=None, null=True)),
+ ('is_app_user', models.BooleanField(default=None, null=True)),
+ ('has_2fa', models.BooleanField(default=None, null=True)),
+ ('main_menu_last_opened_datetime', models.DateTimeField(default=None, null=True)),
+ ('counter', models.PositiveSmallIntegerField(default=1)),
+ ('is_stranger', models.BooleanField(default=False)),
+ ('is_not_found', models.BooleanField(default=False)),
+ ('slack_team_identity', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='slack_user_identities', to='slack.slackteamidentity')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='SlackUserGroup',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('public_primary_key', models.CharField(default=apps.slack.models.slack_usergroup.generate_public_primary_key_for_slack_user_group, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('slack_id', models.CharField(max_length=100)),
+ ('name', models.CharField(max_length=500)),
+ ('handle', models.CharField(max_length=500)),
+ ('members', models.JSONField(blank=True, default=None, null=True)),
+ ('is_active', models.BooleanField(default=False)),
+ ('last_populated', models.DateField(default=None, null=True)),
+ ('slack_team_identity', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='usergroups', to='slack.slackteamidentity')),
+ ],
+ ),
+ migrations.AddField(
+ model_name='slackteamidentity',
+ name='installed_by',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.PROTECT, to='slack.slackuseridentity'),
+ ),
+ migrations.CreateModel(
+ name='SlackMessage',
+ fields=[
+ ('id', models.CharField(default=uuid.uuid4, editable=False, max_length=36, primary_key=True, serialize=False)),
+ ('slack_id', models.CharField(max_length=100)),
+ ('channel_id', models.CharField(default=None, max_length=100, null=True)),
+ ('ack_reminder_message_ts', models.CharField(default=None, max_length=100, null=True)),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('cached_permalink', models.URLField(default=None, max_length=250, null=True)),
+ ('last_updated', models.DateTimeField(default=None, null=True)),
+ ('active_update_task_id', models.CharField(default=None, max_length=100, null=True)),
+ ('_slack_team_identity', models.ForeignKey(db_column='slack_team_identity', default=None, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='slack_message', to='slack.slackteamidentity')),
+ ('alert_group', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='slack_messages', to='alerts.alertgroup')),
+ ],
+ ),
+ ]
diff --git a/engine/apps/slack/migrations/0002_squashed_initial.py b/engine/apps/slack/migrations/0002_squashed_initial.py
new file mode 100644
index 0000000000..3f322e6d08
--- /dev/null
+++ b/engine/apps/slack/migrations/0002_squashed_initial.py
@@ -0,0 +1,53 @@
+# Generated by Django 3.2.5 on 2022-05-31 14:46
+
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ('user_management', '0001_squashed_initial'),
+ ('slack', '0001_squashed_initial'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='slackmessage',
+ name='organization',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='slack_message', to='user_management.organization'),
+ ),
+ migrations.AddField(
+ model_name='slackchannel',
+ name='slack_team_identity',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='cached_channels', to='slack.slackteamidentity'),
+ ),
+ migrations.AddField(
+ model_name='slackactionrecord',
+ name='organization',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='actions', to='user_management.organization'),
+ ),
+ migrations.AddField(
+ model_name='slackactionrecord',
+ name='user',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='actions', to='user_management.user'),
+ ),
+ migrations.AddConstraint(
+ model_name='slackuseridentity',
+ constraint=models.UniqueConstraint(fields=('slack_id', 'slack_team_identity', 'counter'), name='unique_slack_identity_per_team'),
+ ),
+ migrations.AlterUniqueTogether(
+ name='slackusergroup',
+ unique_together={('slack_id', 'slack_team_identity')},
+ ),
+ migrations.AddConstraint(
+ model_name='slackmessage',
+ constraint=models.UniqueConstraint(fields=('slack_id', 'channel_id', '_slack_team_identity'), name='unique slack_id'),
+ ),
+ migrations.AlterUniqueTogether(
+ name='slackchannel',
+ unique_together={('slack_id', 'slack_team_identity')},
+ ),
+ ]
diff --git a/engine/apps/slack/migrations/0003_squashed_create_demo_token_instances.py b/engine/apps/slack/migrations/0003_squashed_create_demo_token_instances.py
new file mode 100644
index 0000000000..ae3368f16f
--- /dev/null
+++ b/engine/apps/slack/migrations/0003_squashed_create_demo_token_instances.py
@@ -0,0 +1,47 @@
+# Generated by Django 3.2.5 on 2021-08-04 10:51
+
+import sys
+from django.db import migrations
+from apps.public_api import constants as public_api_constants
+
+
+def create_demo_token_instances(apps, schema_editor):
+ if not (len(sys.argv) > 1 and sys.argv[1] == 'test'):
+ SlackUserIdentity = apps.get_model('slack', 'SlackUserIdentity')
+ SlackTeamIdentity = apps.get_model('slack', 'SlackTeamIdentity')
+ SlackChannel = apps.get_model('slack', 'SlackChannel')
+ SlackUserGroup = apps.get_model("slack", "SlackUserGroup")
+
+ slack_team_identity, _ = SlackTeamIdentity.objects.get_or_create(
+ slack_id=public_api_constants.DEMO_SLACK_TEAM_ID,
+ )
+ SlackUserIdentity.objects.get_or_create(
+ slack_id=public_api_constants.DEMO_SLACK_USER_ID,
+ slack_team_identity=slack_team_identity,
+ )
+
+ SlackChannel.objects.get_or_create(
+ name=public_api_constants.DEMO_SLACK_CHANNEL_NAME,
+ slack_id=public_api_constants.DEMO_SLACK_CHANNEL_SLACK_ID,
+ slack_team_identity=slack_team_identity,
+ )
+
+ SlackUserGroup.objects.get_or_create(
+ slack_team_identity=slack_team_identity,
+ slack_id=public_api_constants.DEMO_SLACK_USER_GROUP_SLACK_ID,
+ public_primary_key=public_api_constants.DEMO_SLACK_USER_GROUP_ID,
+ name=public_api_constants.DEMO_SLACK_USER_GROUP_NAME,
+ handle=public_api_constants.DEMO_SLACK_USER_GROUP_HANDLE,
+ is_active=True,
+ )
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('slack', '0002_squashed_initial'),
+ ]
+
+ operations = [
+ migrations.RunPython(create_demo_token_instances, migrations.RunPython.noop)
+ ]
diff --git a/engine/apps/slack/migrations/__init__.py b/engine/apps/slack/migrations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/slack/models/__init__.py b/engine/apps/slack/models/__init__.py
new file mode 100644
index 0000000000..4f6ac08b76
--- /dev/null
+++ b/engine/apps/slack/models/__init__.py
@@ -0,0 +1,6 @@
+from .slack_action_record import SlackActionRecord # noqa: F401
+from .slack_channel import SlackChannel # noqa: F401
+from .slack_message import SlackMessage # noqa: F401
+from .slack_team_identity import SlackTeamIdentity # noqa: F401
+from .slack_user_identity import SlackUserIdentity # noqa: F401
+from .slack_usergroup import SlackUserGroup # noqa: F401
diff --git a/engine/apps/slack/models/slack_action_record.py b/engine/apps/slack/models/slack_action_record.py
new file mode 100644
index 0000000000..391abaff4b
--- /dev/null
+++ b/engine/apps/slack/models/slack_action_record.py
@@ -0,0 +1,29 @@
+from django.db import models
+
+from apps.slack.scenarios.scenario_step import ScenarioStep
+
+
+class SlackActionRecord(models.Model):
+ ON_CALL_ROUTINE = [
+ ScenarioStep.get_step("distribute_alerts", "CustomButtonProcessStep").routing_uid(),
+ ScenarioStep.get_step("distribute_alerts", "StopInvitationProcess").routing_uid(),
+ ScenarioStep.get_step("distribute_alerts", "InviteOtherPersonToIncident").routing_uid(),
+ ScenarioStep.get_step("distribute_alerts", "AcknowledgeGroupStep").routing_uid(),
+ ScenarioStep.get_step("distribute_alerts", "UnAcknowledgeGroupStep").routing_uid(),
+ ScenarioStep.get_step("distribute_alerts", "ResolveGroupStep").routing_uid(),
+ ScenarioStep.get_step("distribute_alerts", "SilenceGroupStep").routing_uid(),
+ ]
+
+ organization = models.ForeignKey("user_management.Organization", on_delete=models.CASCADE, related_name="actions")
+
+ user = models.ForeignKey(
+ "user_management.User", on_delete=models.SET_NULL, null=True, default=None, related_name="actions"
+ )
+
+ step = models.CharField(max_length=100, null=True, default=None)
+ payload = models.TextField(null=True, default=None)
+ datetime = models.DateTimeField(auto_now_add=True)
+
+ @staticmethod
+ def filter_only_incident_routine(queryset):
+ return queryset.filter(step__in=SlackActionRecord.ON_CALL_ROUTINE)
diff --git a/engine/apps/slack/models/slack_channel.py b/engine/apps/slack/models/slack_channel.py
new file mode 100644
index 0000000000..f690a55812
--- /dev/null
+++ b/engine/apps/slack/models/slack_channel.py
@@ -0,0 +1,45 @@
+from django.conf import settings
+from django.core.validators import MinLengthValidator
+from django.db import models
+
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+
+
+def generate_public_primary_key_for_slack_channel():
+ prefix = "H"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while SlackChannel.objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="SlackChannel"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+class SlackChannel(models.Model):
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_slack_channel,
+ )
+ slack_id = models.CharField(max_length=100)
+
+ slack_team_identity = models.ForeignKey(
+ "slack.SlackTeamIdentity",
+ on_delete=models.PROTECT,
+ related_name="cached_channels",
+ null=True,
+ default=None,
+ )
+ name = models.CharField(max_length=500)
+
+ is_archived = models.BooleanField(default=False)
+ is_shared = models.BooleanField(null=True, default=None)
+ last_populated = models.DateField(null=True, default=None)
+
+ class Meta:
+ unique_together = ("slack_id", "slack_team_identity")
diff --git a/engine/apps/slack/models/slack_message.py b/engine/apps/slack/models/slack_message.py
new file mode 100644
index 0000000000..4fe6f4706e
--- /dev/null
+++ b/engine/apps/slack/models/slack_message.py
@@ -0,0 +1,237 @@
+import logging
+import time
+import uuid
+
+from django.apps import apps
+from django.db import models
+
+from apps.slack.slack_client import SlackClientWithErrorHandling
+from apps.slack.slack_client.exceptions import (
+ SlackAPIChannelArchivedException,
+ SlackAPIException,
+ SlackAPITokenException,
+)
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+class SlackMessage(models.Model):
+ id = models.CharField(primary_key=True, default=uuid.uuid4, editable=False, max_length=36)
+
+ slack_id = models.CharField(max_length=100)
+ channel_id = models.CharField(max_length=100, null=True, default=None)
+
+ organization = models.ForeignKey(
+ "user_management.Organization", on_delete=models.CASCADE, null=True, default=None, related_name="slack_message"
+ )
+ _slack_team_identity = models.ForeignKey(
+ "slack.SlackTeamIdentity",
+ on_delete=models.PROTECT,
+ null=True,
+ default=None,
+ related_name="slack_message",
+ db_column="slack_team_identity",
+ )
+
+ ack_reminder_message_ts = models.CharField(max_length=100, null=True, default=None)
+
+ created_at = models.DateTimeField(auto_now_add=True)
+
+ cached_permalink = models.URLField(max_length=250, null=True, default=None)
+
+ last_updated = models.DateTimeField(null=True, default=None)
+
+ alert_group = models.ForeignKey(
+ "alerts.AlertGroup",
+ on_delete=models.CASCADE,
+ null=True,
+ default=None,
+ related_name="slack_messages",
+ )
+
+ # ID of a latest celery task to update the message
+ active_update_task_id = models.CharField(max_length=100, null=True, default=None)
+
+ class Meta:
+ # slack_id is unique within the context of a channel or conversation
+ constraints = [
+ models.UniqueConstraint(fields=["slack_id", "channel_id", "_slack_team_identity"], name="unique slack_id")
+ ]
+
+ @property
+ def slack_team_identity(self):
+ if self._slack_team_identity is None:
+ if self.organization is None: # strange case when organization is None
+ logger.warning(
+ f"SlackMessage (pk: {self.pk}) fields _slack_team_identity and organization is None. "
+ f"It is strange!"
+ )
+ return None
+ # Re-take object to switch connection from readonly db to master.
+ self._slack_team_identity = self.organization.slack_team_identity
+
+ _self = SlackMessage.objects.get(pk=self.pk)
+ _self._slack_team_identity = _self.organization.slack_team_identity
+ _self.save()
+ return self._slack_team_identity
+
+ def get_alert_group(self):
+ try:
+ return self._alert_group
+ except SlackMessage._alert_group.RelatedObjectDoesNotExist:
+ if self.alert_group:
+ self.alert_group.slack_message = self
+ self.alert_group.save(update_fields=["slack_message"])
+ return self.alert_group
+ return self.alert.group
+
+ @property
+ def permalink(self):
+ if self.slack_team_identity is not None and self.cached_permalink is None:
+ sc = SlackClientWithErrorHandling(self.slack_team_identity.bot_access_token)
+ result = None
+ try:
+ result = sc.api_call(
+ "chat.getPermalink",
+ channel=self.channel_id,
+ message_ts=self.slack_id,
+ )
+ except SlackAPIException as e:
+ if e.response["error"] == "message_not_found":
+ return "https://slack.com/resources/using-slack/page/404"
+ elif e.response["error"] == "channel_not_found":
+ return "https://slack.com/resources/using-slack/page/404"
+
+ if result is not None and result["permalink"] is not None:
+ # Reconnect to DB in case we use read-only DB here.
+ _self = SlackMessage.objects.get(pk=self.pk)
+ _self.cached_permalink = result["permalink"]
+ _self.save()
+ self.cached_permalink = _self.cached_permalink
+
+ if self.cached_permalink is not None:
+ return self.cached_permalink
+
+ def send_slack_notification(self, user, alert_group, notification_policy):
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+ slack_message = alert_group.get_slack_message()
+ user_verbal = user.get_user_verbal_for_team_for_slack(mention=True)
+
+ slack_user_identity = user.slack_user_identity
+ if slack_user_identity is None:
+ text = "{}\nTried to invite {} to look at incident. Unfortunately {} is not in slack.".format(
+ alert_group.long_verbose_name, user_verbal, user_verbal
+ )
+
+ UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ reason="User is not in Slack",
+ notification_step=notification_policy.step,
+ notification_channel=notification_policy.notify_by,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_USER_NOT_IN_SLACK,
+ ).save()
+ else:
+ text = "{}\nInviting {} to look at incident.".format(alert_group.long_verbose_name, user_verbal)
+
+ attachments = [
+ {"color": "#c6c000", "callback_id": "alert", "text": text}, # yellow
+ ]
+ sc = SlackClientWithErrorHandling(self.slack_team_identity.bot_access_token)
+ channel_id = slack_message.channel_id
+
+ try:
+ result = sc.api_call(
+ "chat.postMessage",
+ channel=channel_id,
+ attachments=attachments,
+ thread_ts=slack_message.slack_id,
+ unfurl_links=True,
+ )
+ except SlackAPITokenException as e:
+ print(e)
+ UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ reason="Slack token error",
+ notification_step=notification_policy.step,
+ notification_channel=notification_policy.notify_by,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_TOKEN_ERROR,
+ ).save()
+ return
+ except SlackAPIChannelArchivedException as e:
+ print(e)
+ UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ reason="channel is archived",
+ notification_step=notification_policy.step,
+ notification_channel=notification_policy.notify_by,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_CHANNEL_IS_ARCHIVED,
+ ).save()
+ return
+ else:
+ SlackMessage(
+ slack_id=result["ts"],
+ organization=self.organization,
+ _slack_team_identity=self.slack_team_identity,
+ channel_id=channel_id,
+ alert_group=alert_group,
+ ).save()
+
+ # Check if escalated user is in channel. Otherwise send notification and request to invite him.
+ try:
+ if slack_user_identity:
+ channel_members = []
+ try:
+ channel_members = sc.api_call("conversations.members", channel=channel_id)["members"]
+ except SlackAPIException as e:
+ if e.response["error"] == "fetch_members_failed":
+ logger.warning(
+ f"Unable to get members from slack conversation: 'fetch_members_failed'. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"{e}"
+ )
+ else:
+ raise e
+
+ if slack_user_identity.slack_id not in channel_members:
+ time.sleep(5) # 2 messages in the same moment are ratelimited by Slack. Dirty hack.
+ result = sc.api_call(
+ "chat.postMessage",
+ channel=channel_id,
+ text=f":warning: Tried to ask {user_verbal} to look at incident. "
+ f"Unfortunately {user_verbal} is not in this channel. Please, invite.",
+ )
+ SlackMessage(
+ slack_id=result["ts"],
+ organization=self.organization,
+ _slack_team_identity=self.slack_team_identity,
+ channel_id=channel_id,
+ alert_group=alert_group,
+ ).save()
+ UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ reason="User is not in Slack channel",
+ notification_step=notification_policy.step,
+ notification_channel=notification_policy.notify_by,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_USER_NOT_IN_CHANNEL,
+ ).save()
+ except SlackAPITokenException as e:
+ print(e)
+ except SlackAPIException as e:
+ if e.response["error"] == "method_not_supported_for_channel_type":
+ # It's ok, just a private channel. Passing
+ pass
+ else:
+ raise e
diff --git a/engine/apps/slack/models/slack_team_identity.py b/engine/apps/slack/models/slack_team_identity.py
new file mode 100644
index 0000000000..b8d76ce999
--- /dev/null
+++ b/engine/apps/slack/models/slack_team_identity.py
@@ -0,0 +1,160 @@
+import logging
+
+from django.apps import apps
+from django.db import models
+from django.db.models import JSONField
+
+from apps.slack.constants import SLACK_INVALID_AUTH_RESPONSE, SLACK_WRONG_TEAM_NAMES
+from apps.slack.slack_client import SlackClientWithErrorHandling
+from apps.slack.slack_client.exceptions import SlackAPIException, SlackAPITokenException
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.constants.role import Role
+
+logger = logging.getLogger(__name__)
+
+
+class SlackTeamIdentity(models.Model):
+
+ id = models.AutoField(primary_key=True)
+ slack_id = models.CharField(max_length=100)
+ cached_name = models.CharField(max_length=100, null=True, default=None)
+ cached_app_id = models.CharField(max_length=100, null=True, default=None)
+ access_token = models.CharField(max_length=100, null=True, default=None)
+ bot_user_id = models.CharField(max_length=100, null=True, default=None)
+ bot_access_token = models.CharField(max_length=100, null=True, default=None)
+ oauth_scope = models.TextField(max_length=30000, null=True, default=None)
+ detected_token_revoked = models.DateTimeField(null=True, default=None, verbose_name="Deleted At")
+ is_profile_populated = models.BooleanField(default=False)
+ datetime = models.DateTimeField(auto_now_add=True)
+ installed_via_granular_permissions = models.BooleanField(default=True)
+
+ installed_by = models.ForeignKey("SlackUserIdentity", on_delete=models.PROTECT, null=True, default=None)
+
+ last_populated = models.DateTimeField(null=True, default=None)
+
+ cached_bot_id = models.CharField(max_length=100, null=True, default=None)
+
+ # response after oauth.access. This field is used to reinstall app to another OnCall workspace
+ cached_reinstall_data = JSONField(null=True, default=None)
+
+ class Meta:
+ ordering = ("datetime",)
+
+ def __str__(self):
+ return f"{self.pk}: {self.name}"
+
+ def update_oauth_fields(self, user, organization, reinstall_data):
+ logger.info(f"updated oauth_fields for sti {self.pk}")
+ SlackUserIdentity = apps.get_model("slack", "SlackUserIdentity")
+ organization.slack_team_identity = self
+ organization.save(update_fields=["slack_team_identity"])
+ slack_user_identity, _ = SlackUserIdentity.objects.get_or_create(
+ slack_id=reinstall_data["authed_user"]["id"],
+ slack_team_identity=self,
+ )
+ user.slack_user_identity = slack_user_identity
+ user.save(update_fields=["slack_user_identity"])
+ self.bot_access_token = reinstall_data["access_token"]
+ self.bot_user_id = reinstall_data["bot_user_id"]
+ self.oauth_scope = reinstall_data["scope"]
+ self.cached_name = reinstall_data["team"]["name"]
+ self.access_token = reinstall_data["authed_user"]["access_token"]
+ self.installed_by = slack_user_identity
+ self.cached_reinstall_data = None
+ self.installed_via_granular_permissions = True
+ self.save()
+ description = f"Slack workspace {self.cached_name} was connected to organization"
+ create_organization_log(organization, user, OrganizationLogType.TYPE_SLACK_WORKSPACE_CONNECTED, description)
+
+ def get_cached_channels(self, search_term=None, slack_id=None):
+ queryset = self.cached_channels
+ if search_term is not None:
+ queryset = queryset.filter(name__startswith=search_term)
+ if slack_id is not None:
+ queryset = queryset.filter(slack_id=slack_id)
+ return queryset.all()
+
+ @property
+ def bot_id(self):
+ if self.cached_bot_id is None:
+ sc = SlackClientWithErrorHandling(self.bot_access_token)
+ auth = sc.api_call("auth.test")
+ self.cached_bot_id = auth.get("bot_id")
+ self.save(update_fields=["cached_bot_id"])
+ return self.cached_bot_id
+
+ @property
+ def members(self):
+ sc = SlackClientWithErrorHandling(self.bot_access_token)
+
+ next_cursor = None
+ members = []
+ while next_cursor != "" or next_cursor is None:
+ result = sc.api_call("users.list", cursor=next_cursor, team=self)
+ next_cursor = result["response_metadata"]["next_cursor"]
+ members += result["members"]
+
+ return members
+
+ @property
+ def name(self):
+ if self.cached_name is None or self.cached_name in SLACK_WRONG_TEAM_NAMES:
+ try:
+ sc = SlackClientWithErrorHandling(self.bot_access_token)
+ result = sc.api_call("team.info")
+ self.cached_name = result["team"]["name"]
+ self.save()
+ except SlackAPIException as e:
+ if e.response["error"] == "invalid_auth":
+ self.cached_name = SLACK_INVALID_AUTH_RESPONSE
+ self.save()
+ else:
+ raise e
+ return self.cached_name
+
+ @property
+ def app_id(self):
+ if not self.cached_app_id:
+ sc = SlackClientWithErrorHandling(self.bot_access_token)
+ result = sc.api_call("bots.info", bot=self.bot_id)
+ app_id = result["bot"]["app_id"]
+ self.cached_app_id = app_id
+ self.save(update_fields=["cached_app_id"])
+ return self.cached_app_id
+
+ def get_users_from_slack_conversation_for_organization(self, channel_id, organization):
+ sc = SlackClientWithErrorHandling(self.bot_access_token)
+ members = self.get_conversation_members(sc, channel_id)
+
+ users = organization.users.filter(slack_user_identity__slack_id__in=members, role__in=[Role.ADMIN, Role.EDITOR])
+ return users
+
+ def get_conversation_members(self, slack_client, channel_id):
+ try:
+ members = slack_client.paginated_api_call(
+ "conversations.members", channel=channel_id, paginated_key="members"
+ )["members"]
+ except SlackAPITokenException as e:
+ logger.warning(
+ f"Unable to get members from slack conversation for Slack team identity pk: {self.pk}.\n" f"{e}"
+ )
+ members = []
+ except SlackAPIException as e:
+ if e.response["error"] == "fetch_members_failed":
+ logger.warning(
+ f"Unable to get members from slack conversation: 'fetch_members_failed'. "
+ f"Slack team identity pk: {self.pk}.\n"
+ f"{e}"
+ )
+ members = []
+ elif e.response["error"] == "channel_not_found":
+ logger.warning(
+ f"Unable to get members from slack conversation: 'channel_not_found'. "
+ f"Slack team identity pk: {self.pk}.\n"
+ f"{e}"
+ )
+ members = []
+ else:
+ raise e
+
+ return members
diff --git a/engine/apps/slack/models/slack_user_identity.py b/engine/apps/slack/models/slack_user_identity.py
new file mode 100644
index 0000000000..4c0047fa34
--- /dev/null
+++ b/engine/apps/slack/models/slack_user_identity.py
@@ -0,0 +1,227 @@
+import logging
+
+import requests
+from django.db import models
+
+from apps.slack.constants import SLACK_BOT_ID
+from apps.slack.slack_client import SlackClientWithErrorHandling
+from apps.slack.slack_client.exceptions import SlackAPIException, SlackAPITokenException
+from apps.user_management.models import User
+
+logger = logging.getLogger(__name__)
+
+
+class AllSlackUserIdentityManager(models.Manager):
+ use_in_migrations = False
+
+ def get_queryset(self):
+ return super().get_queryset()
+
+
+class SlackUserIdentityManager(models.Manager):
+ use_in_migrations = False
+
+ def get_queryset(self):
+ return super().get_queryset().filter(counter=1)
+
+ def get(self, **kwargs):
+ try:
+ instance = super().get(**kwargs, is_restricted=False, is_ultra_restricted=False)
+ except SlackUserIdentity.DoesNotExist:
+ instance = self.filter(**kwargs).first()
+ if instance is None:
+ raise SlackUserIdentity.DoesNotExist
+ return instance
+
+
+class SlackUserIdentity(models.Model):
+
+ objects = SlackUserIdentityManager()
+ all_objects = AllSlackUserIdentityManager()
+
+ id = models.AutoField(primary_key=True)
+
+ slack_id = models.CharField(max_length=100)
+
+ slack_team_identity = models.ForeignKey(
+ "SlackTeamIdentity", on_delete=models.PROTECT, related_name="slack_user_identities"
+ )
+
+ cached_slack_email = models.EmailField(blank=True, default="")
+
+ cached_im_channel_id = models.CharField(max_length=100, null=True, default=None)
+ cached_phone_number = models.CharField(max_length=20, null=True, default=None)
+ cached_country_code = models.CharField(max_length=3, null=True, default=None)
+ cached_timezone = models.CharField(max_length=100, null=True, default=None)
+ cached_slack_login = models.CharField(max_length=100, null=True, default=None)
+ cached_avatar = models.URLField(max_length=200, null=True, default=None)
+ cached_name = models.CharField(max_length=200, null=True, default=None)
+
+ phone_from_onboarding = models.BooleanField(default=False)
+
+ cached_is_bot = models.BooleanField(null=True, default=None)
+
+ # Fields from user profile
+ profile_real_name_normalized = models.CharField(max_length=200, null=True, default=None)
+ profile_display_name = models.CharField(max_length=200, null=True, default=None)
+ profile_display_name_normalized = models.CharField(max_length=200, null=True, default=None)
+ profile_real_name = models.CharField(max_length=200, null=True, default=None)
+
+ deleted = models.BooleanField(null=True, default=None)
+ is_admin = models.BooleanField(null=True, default=None)
+ is_owner = models.BooleanField(null=True, default=None)
+ is_primary_owner = models.BooleanField(null=True, default=None)
+ is_restricted = models.BooleanField(null=True, default=None)
+ is_ultra_restricted = models.BooleanField(null=True, default=None)
+ is_app_user = models.BooleanField(null=True, default=None)
+ has_2fa = models.BooleanField(null=True, default=None)
+
+ main_menu_last_opened_datetime = models.DateTimeField(null=True, default=None)
+ counter = models.PositiveSmallIntegerField(default=1)
+
+ is_stranger = models.BooleanField(default=False)
+ is_not_found = models.BooleanField(default=False)
+
+ class Meta:
+ constraints = [
+ models.UniqueConstraint(
+ fields=["slack_id", "slack_team_identity", "counter"], name="unique_slack_identity_per_team"
+ )
+ ]
+
+ def __str__(self):
+ return self.slack_login
+
+ @property
+ def slack_verbal(self):
+ return (
+ self.profile_real_name_normalized
+ or self.profile_real_name
+ or self.profile_display_name_normalized
+ or self.profile_display_name
+ or self.cached_name
+ or self.cached_slack_login
+ )
+
+ @property
+ def slack_login(self):
+ if self.cached_slack_login is None or self.cached_slack_login == "slack_token_revoked_unable_to_cache_login":
+ _self = SlackUserIdentity.objects.get(pk=self.pk) # Re-take in case we are in the readonly db context.
+ sc = SlackClientWithErrorHandling(self.slack_team_identity.bot_access_token)
+ try:
+ result = sc.api_call(
+ "users.info",
+ user=self.slack_id,
+ team=self.slack_team_identity,
+ )
+ self.cached_slack_login = result["user"]["name"]
+ _self.cached_slack_login = result["user"]["name"]
+ _self.save()
+ except SlackAPITokenException as e:
+ logger.warning("Unable to get slack login: token revoked\n" + str(e))
+ self.cached_slack_login = "slack_token_revoked_unable_to_cache_login"
+ _self.cached_slack_login = "slack_token_revoked_unable_to_cache_login"
+ _self.save()
+ return "slack_token_revoked_unable_to_cache_login"
+ except SlackAPIException as e:
+ if e.response["error"] == "user_not_found":
+ logger.warning("user_not_found " + str(e))
+ self.cached_slack_login = "user_not_found"
+ _self.cached_slack_login = "user_not_found"
+ _self.save()
+ elif e.response["error"] == "invalid_auth":
+ return "no_enough_permissions_to_retrieve"
+ else:
+ raise e
+
+ return str(self.cached_slack_login)
+
+ @property
+ def timezone(self):
+ if self.cached_timezone is None or self.cached_timezone == "None":
+ _self = SlackUserIdentity.objects.get(pk=self.pk) # Re-take in case we are in the readonly db context.
+ sc = SlackClientWithErrorHandling(self.slack_team_identity.bot_access_token)
+ try:
+ result = sc.api_call(
+ "users.info",
+ user=self.slack_id,
+ timeout=5,
+ )
+ tz_from_slack = result["user"].get("tz", "UTC")
+ if tz_from_slack == "None" or tz_from_slack is None:
+ tz_from_slack = "UTC"
+ self.cached_timezone = tz_from_slack
+ _self.cached_timezone = tz_from_slack
+ _self.save(update_fields=["cached_timezone"])
+ except SlackAPITokenException as e:
+ print("Token revoked: " + str(e))
+ except requests.exceptions.Timeout:
+ # Do not save tz in case of timeout to try to load it later again
+ return "UTC"
+
+ return str(self.cached_timezone)
+
+ @property
+ def im_channel_id(self):
+ if self.cached_im_channel_id is None:
+ _self = SlackUserIdentity.objects.get(pk=self.pk) # Re-take in case we are in the readonly db context.
+ sc = SlackClientWithErrorHandling(self.slack_team_identity.bot_access_token)
+ try:
+ result = sc.api_call("conversations.open", users=self.slack_id, return_im=True)
+ self.cached_im_channel_id = result["channel"]["id"]
+ _self.cached_im_channel_id = result["channel"]["id"]
+ _self.save()
+ except SlackAPIException as e:
+ if e.response["error"] == "cannot_dm_bot":
+ logger.warning("Trying to DM bot " + str(e))
+ else:
+ raise e
+
+ return self.cached_im_channel_id
+
+ def update_profile_info(self):
+ sc = SlackClientWithErrorHandling(self.slack_team_identity.bot_access_token)
+ logger.info("Update user profile info")
+ try:
+ result = sc.api_call(
+ "users.info",
+ user=self.slack_id,
+ team=self.slack_team_identity,
+ )
+ except SlackAPITokenException as e:
+ logger.warning(f"Unable to get user info due token revoked or account inactive: {e}")
+ result = None
+ else:
+ if not self.cached_slack_email and "email" in result["user"]["profile"]:
+ self.cached_slack_email = result["user"]["profile"]["email"]
+ if "real_name" in result["user"]["profile"]:
+ self.profile_real_name = result["user"]["profile"]["real_name"]
+ if "real_name_normalized" in result["user"]["profile"]:
+ self.profile_real_name_normalized = result["user"]["profile"]["real_name_normalized"]
+ if "display_name" in result["user"]["profile"]:
+ self.profile_display_name = result["user"]["profile"]["display_name"]
+ if "display_name_normalized" in result["user"]["profile"]:
+ self.profile_display_name_normalized = result["user"]["profile"]["display_name_normalized"]
+ self.cached_avatar = result["user"]["profile"].get("image_512")
+ if result["user"].get("is_bot") is True or result["user"].get("id") == SLACK_BOT_ID:
+ self.cached_is_bot = True
+ self.cached_name = result["user"].get("real_name", result["user"]["name"])
+ self.cached_slack_login = result["user"].get("name")
+ self.save()
+ return result
+
+ def get_slack_username(self):
+ if not self.slack_verbal:
+ logger.info("Trying to get username from slack")
+ result = self.update_profile_info()
+ if result is None:
+ logger.info("Unable to populate username")
+ return None
+ return self.slack_verbal or self.cached_slack_email.split("@")[0] or None
+
+ def get_user(self, organization):
+ try:
+ user = organization.users.get(slack_user_identity=self)
+ except User.DoesNotExist:
+ user = None
+ return user
diff --git a/engine/apps/slack/models/slack_usergroup.py b/engine/apps/slack/models/slack_usergroup.py
new file mode 100644
index 0000000000..2b5f8fb6f7
--- /dev/null
+++ b/engine/apps/slack/models/slack_usergroup.py
@@ -0,0 +1,177 @@
+import logging
+
+import requests
+from django.conf import settings
+from django.core.validators import MinLengthValidator
+from django.db import models
+from django.db.models import JSONField
+from django.utils import timezone
+
+from apps.slack.slack_client import SlackClientWithErrorHandling
+from apps.slack.slack_client.exceptions import SlackAPIException
+from common.constants.role import Role
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+
+logger = logging.getLogger(__name__)
+
+
+def generate_public_primary_key_for_slack_user_group():
+ prefix = "G"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while SlackUserGroup.objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="SlackUserGroup"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+class SlackUserGroup(models.Model):
+
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_slack_user_group,
+ )
+
+ slack_id = models.CharField(max_length=100)
+
+ slack_team_identity = models.ForeignKey(
+ "slack.SlackTeamIdentity",
+ on_delete=models.PROTECT,
+ related_name="usergroups",
+ null=True,
+ default=None,
+ )
+ name = models.CharField(max_length=500)
+ handle = models.CharField(max_length=500)
+ members = JSONField(default=None, null=True, blank=True)
+ is_active = models.BooleanField(default=False)
+ last_populated = models.DateField(null=True, default=None)
+
+ class Meta:
+ unique_together = ("slack_id", "slack_team_identity")
+
+ @property
+ def can_be_updated(self) -> bool:
+ sc = SlackClientWithErrorHandling(self.slack_team_identity.bot_access_token)
+
+ try:
+ sc.api_call("usergroups.update", usergroup=self.slack_id, timeout=5)
+ return True
+ except (SlackAPIException, requests.exceptions.Timeout):
+ return False
+
+ @property
+ def oncall_slack_user_identities(self):
+ users = self.oncall_schedules.get_oncall_users()
+ slack_user_identities = [user.slack_user_identity for user in users if user.slack_user_identity is not None]
+ return slack_user_identities
+
+ def update_oncall_members(self):
+ slack_ids = [slack_user_identity.slack_id for slack_user_identity in self.oncall_slack_user_identities]
+
+ # Slack doesn't allow user groups to be empty
+ if len(slack_ids) == 0:
+ return
+
+ # Do not send requests to Slack API in case user group is populated correctly already
+ if self.members is not None and set(self.members) == set(slack_ids):
+ return
+
+ try:
+ self.update_members(slack_ids)
+ except SlackAPIException as e:
+ if e.response["error"] == "permission_denied":
+ logger.warning(
+ "Could not update the usergroup with Slack ID: {} due to permission_denied".format(self.slack_id)
+ )
+
+ def update_members(self, slack_ids):
+ sc = SlackClientWithErrorHandling(self.slack_team_identity.bot_access_token)
+
+ sc.api_call(
+ "usergroups.users.update",
+ usergroup=self.slack_id,
+ users=slack_ids,
+ )
+
+ self.members = slack_ids
+ self.save(update_fields=("members",))
+
+ def get_users_from_members_for_organization(self, organization):
+ return organization.users.filter(
+ slack_user_identity__slack_id__in=self.members, role__in=[Role.ADMIN, Role.EDITOR]
+ )
+
+ @classmethod
+ def update_or_create_slack_usergroup_from_slack(cls, slack_id, slack_team_identity):
+ sc = SlackClientWithErrorHandling(slack_team_identity.bot_access_token)
+ bot_access_token_accepted = True
+ try:
+ usergroups_list = sc.api_call(
+ "usergroups.list",
+ )
+ except SlackAPIException as e:
+ if e.response["error"] == "not_allowed_token_type":
+ # Trying same request with access token. It is required due to migration to granular permissions
+ # and can be removed after clients reinstall their bots
+ try:
+ sc_with_access_token = SlackClientWithErrorHandling(slack_team_identity.access_token)
+ usergroups_list = sc_with_access_token.api_call(
+ "usergroups.list",
+ )
+ bot_access_token_accepted = False
+ except SlackAPIException as err:
+ if err.response["error"] == "missing_scope":
+ return None, False
+ else:
+ raise err
+ elif e.response["error"] == "missing_scope":
+ return None, False
+ else:
+ raise e
+
+ for usergroup in usergroups_list["usergroups"]:
+ if usergroup["id"] == slack_id:
+ try:
+ if bot_access_token_accepted:
+ usergroups_users = sc.api_call(
+ "usergroups.users.list",
+ usergroup=usergroup["id"],
+ )
+ else:
+ sc_with_access_token = SlackClientWithErrorHandling(slack_team_identity.access_token)
+ usergroups_users = sc_with_access_token.api_call(
+ "usergroups.users.list",
+ usergroup=usergroup["id"],
+ )
+ except SlackAPIException as e:
+ if e.response["error"] == "no_such_subteam":
+ logger.info("User group does not exist")
+ else:
+ logger.error(
+ f"'usergroups.users.list' slack api error. "
+ f"SlackTeamIdentity pk: {slack_team_identity.pk}\n{e}"
+ )
+ else:
+ usergroup_name = usergroup["name"]
+ usergroup_handle = usergroup["handle"]
+ usergroup_members = usergroups_users["users"]
+ usergroup_is_active = usergroup["date_delete"] == 0
+
+ return SlackUserGroup.objects.update_or_create(
+ slack_id=usergroup["id"],
+ slack_team_identity=slack_team_identity,
+ defaults={
+ "name": usergroup_name,
+ "handle": usergroup_handle,
+ "members": usergroup_members,
+ "is_active": usergroup_is_active,
+ "last_populated": timezone.now().date(),
+ },
+ )
diff --git a/engine/apps/slack/representatives/__init__.py b/engine/apps/slack/representatives/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/slack/representatives/alert_group_representative.py b/engine/apps/slack/representatives/alert_group_representative.py
new file mode 100644
index 0000000000..2fc26c69b1
--- /dev/null
+++ b/engine/apps/slack/representatives/alert_group_representative.py
@@ -0,0 +1,299 @@
+import logging
+
+from celery.utils.log import get_task_logger
+from django.apps import apps
+from django.conf import settings
+
+from apps.alerts.constants import ActionSource
+from apps.alerts.representative import AlertGroupAbstractRepresentative
+from apps.slack.scenarios.scenario_step import ScenarioStep
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+logger = get_task_logger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def on_create_alert_slack_representative_async(alert_pk):
+ """
+ It's asynced in order to prevent Slack downtime causing issues with SMS and other destinations.
+ """
+ Alert = apps.get_model("alerts", "Alert")
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+
+ alert = (
+ Alert.objects.filter(pk=alert_pk)
+ .select_related(
+ "group",
+ "group__channel",
+ "group__channel__organization",
+ "group__channel__organization__slack_team_identity",
+ )
+ .get()
+ )
+ logger.debug(f"Start on_create_alert_slack_representative for alert {alert_pk} from alert_group {alert.group_id}")
+
+ # don't need to publish in slack maintenance alert
+ # it was published earlier
+ if alert.group.maintenance_uuid is not None:
+ return
+ # don't need to publish alerts in slack while integration on maintenance
+ if (
+ alert.group.channel.maintenance_mode == AlertReceiveChannel.MAINTENANCE
+ or alert.group.channel.organization.maintenance_mode == AlertReceiveChannel.MAINTENANCE is not None
+ ):
+ return
+
+ organization = alert.group.channel.organization
+ if organization.slack_team_identity:
+ logger.debug(
+ f"Process on_create_alert_slack_representative for alert {alert_pk} from alert_group {alert.group_id}"
+ )
+ AlertShootingStep = ScenarioStep.get_step("distribute_alerts", "AlertShootingStep")
+ step = AlertShootingStep(organization.slack_team_identity, organization)
+ step.process_signal(alert)
+ else:
+ logger.debug(
+ f"Drop on_create_alert_slack_representative for alert {alert_pk} from alert_group {alert.group_id}"
+ )
+ logger.debug(f"Finish on_create_alert_slack_representative for alert {alert_pk} from alert_group {alert.group_id}")
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def on_alert_group_action_triggered_async(log_record_id):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ logger.debug(f"SLACK representative: get log record {log_record_id}")
+
+ log_record = AlertGroupLogRecord.objects.get(pk=log_record_id)
+ alert_group_id = log_record.alert_group_id
+ logger.debug(f"Start on_alert_group_action_triggered for alert_group {alert_group_id}, log record {log_record_id}")
+ instance = AlertGroupSlackRepresentative(log_record)
+ if instance.is_applicable():
+ logger.debug(f"SLACK representative is applicable for alert_group {alert_group_id}, log record {log_record_id}")
+ handler = instance.get_handler()
+ logger.debug(
+ f"Found handler {handler.__name__} in SLACK representative for alert_group {alert_group_id}, "
+ f"log record {log_record_id}"
+ )
+ handler()
+ logger.debug(
+ f"Finish handler {handler.__name__} in SLACK representative for alert_group {alert_group_id}, "
+ f"log record {log_record_id}"
+ )
+ else:
+ logger.debug(
+ f"SLACK representative is NOT applicable for alert_group {alert_group_id}, log record {log_record_id}"
+ )
+ logger.debug(f"Finish on_alert_group_action_triggered for alert_group {alert_group_id}, log record {log_record_id}")
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def on_alert_group_update_log_report_async(alert_group_id):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_id)
+ logger.debug(f"Start on_alert_group_update_log_report for alert_group {alert_group_id}")
+ organization = alert_group.channel.organization
+ if alert_group.slack_message and organization.slack_team_identity:
+ logger.debug(f"Process on_alert_group_update_log_report for alert_group {alert_group_id}")
+ UpdateLogReportMessageStep = ScenarioStep.get_step("distribute_alerts", "UpdateLogReportMessageStep")
+ step = UpdateLogReportMessageStep(organization.slack_team_identity, organization)
+ step.process_signal(alert_group)
+ else:
+ logger.debug(f"Drop on_alert_group_update_log_report for alert_group {alert_group_id}")
+ logger.debug(f"Finish on_alert_group_update_log_report for alert_group {alert_group_id}")
+
+
+class AlertGroupSlackRepresentative(AlertGroupAbstractRepresentative):
+ def __init__(self, log_record):
+ self.log_record = log_record
+
+ def is_applicable(self):
+ slack_message = self.log_record.alert_group.get_slack_message()
+ slack_team_identity = self.log_record.alert_group.channel.organization.slack_team_identity
+ return (
+ slack_message is not None
+ and slack_team_identity is not None
+ and slack_message.slack_team_identity == slack_team_identity
+ )
+
+ @classmethod
+ def on_create_alert(cls, **kwargs):
+ Alert = apps.get_model("alerts", "Alert")
+ alert = kwargs["alert"]
+ if isinstance(alert, Alert):
+ alert_id = alert.pk
+ else:
+ alert_id = alert
+ alert = Alert.objects.get(pk=alert_id)
+
+ logger.debug(
+ f"Received alert_create_signal in SLACK representative for alert {alert_id} "
+ f"from alert_group {alert.group_id}"
+ )
+
+ if alert.group.notify_in_slack_enabled is False:
+ logger.debug(
+ f"Skipping alert with id {alert_id} from alert_group {alert.group_id} since notify_in_slack is disabled"
+ )
+ return
+ on_create_alert_slack_representative_async.apply_async((alert_id,))
+
+ logger.debug(
+ f"Async process alert_create_signal in SLACK representative for alert {alert_id} "
+ f"from alert_group {alert.group_id}"
+ )
+
+ @classmethod
+ def on_alert_group_action_triggered(cls, **kwargs):
+ logger.debug("Received alert_group_action_triggered signal in SLACK representative")
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ log_record = kwargs["log_record"]
+ action_source = kwargs.get("action_source")
+ force_sync = kwargs.get("force_sync", False)
+ if isinstance(log_record, AlertGroupLogRecord):
+ log_record_id = log_record.pk
+ else:
+ log_record_id = log_record
+
+ if action_source == ActionSource.SLACK or force_sync:
+ on_alert_group_action_triggered_async(log_record_id)
+ else:
+ on_alert_group_action_triggered_async.apply_async((log_record_id,))
+
+ @classmethod
+ def on_alert_group_update_log_report(cls, **kwargs):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ alert_group = kwargs["alert_group"]
+
+ if isinstance(alert_group, AlertGroup):
+ alert_group_id = alert_group.pk
+ else:
+ alert_group_id = alert_group
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_id)
+
+ logger.debug(
+ f"Received alert_group_update_log_report signal in SLACK representative for alert_group {alert_group_id}"
+ )
+
+ if alert_group.notify_in_slack_enabled is False:
+ logger.debug(f"Skipping alert_group {alert_group_id} since notify_in_slack is disabled")
+ return
+
+ on_alert_group_update_log_report_async.apply_async((alert_group_id,))
+
+ @classmethod
+ def on_alert_group_update_resolution_note(cls, **kwargs):
+ alert_group = kwargs["alert_group"]
+ resolution_note = kwargs.get("resolution_note")
+ organization = alert_group.channel.organization
+ logger.debug(
+ f"Received alert_group_update_resolution_note signal in SLACK representative for alert_group {alert_group.pk}"
+ )
+ if alert_group.slack_message and organization.slack_team_identity:
+ UpdateResolutionNoteStep = ScenarioStep.get_step("resolution_note", "UpdateResolutionNoteStep")
+ step = UpdateResolutionNoteStep(organization.slack_team_identity, organization)
+ step.process_signal(alert_group, resolution_note)
+
+ def on_acknowledge(self):
+ AcknowledgeGroupStep = ScenarioStep.get_step("distribute_alerts", "AcknowledgeGroupStep")
+ step = AcknowledgeGroupStep(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def on_un_acknowledge(self):
+ UnAcknowledgeGroupStep = ScenarioStep.get_step("distribute_alerts", "UnAcknowledgeGroupStep")
+ step = UnAcknowledgeGroupStep(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def on_resolve(self):
+ ResolveGroupStep = ScenarioStep.get_step("distribute_alerts", "ResolveGroupStep")
+ step = ResolveGroupStep(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def on_un_resolve(self):
+ UnResolveGroupStep = ScenarioStep.get_step("distribute_alerts", "UnResolveGroupStep")
+ step = UnResolveGroupStep(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def on_attach(self):
+ AttachGroupStep = ScenarioStep.get_step("distribute_alerts", "AttachGroupStep")
+ step = AttachGroupStep(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def on_fail_attach(self):
+ AttachGroupStep = ScenarioStep.get_step("distribute_alerts", "AttachGroupStep")
+ step = AttachGroupStep(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def on_un_attach(self):
+ UnAttachGroupStep = ScenarioStep.get_step("distribute_alerts", "UnAttachGroupStep")
+ step = UnAttachGroupStep(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def on_silence(self):
+ SilenceGroupStep = ScenarioStep.get_step("distribute_alerts", "SilenceGroupStep")
+ step = SilenceGroupStep(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def on_un_silence(self):
+ UnSilenceGroupStep = ScenarioStep.get_step("distribute_alerts", "UnSilenceGroupStep")
+ step = UnSilenceGroupStep(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def on_invite(self):
+ InviteOtherPersonToIncident = ScenarioStep.get_step("distribute_alerts", "InviteOtherPersonToIncident")
+ step = InviteOtherPersonToIncident(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def on_re_invite(self):
+ self.on_invite()
+
+ def on_un_invite(self):
+ StopInvitationProcess = ScenarioStep.get_step("distribute_alerts", "StopInvitationProcess")
+ step = StopInvitationProcess(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def on_auto_un_acknowledge(self):
+ self.on_un_acknowledge()
+
+ def on_ack_reminder_triggered(self):
+ AcknowledgeConfirmationStep = ScenarioStep.get_step("distribute_alerts", "AcknowledgeConfirmationStep")
+ step = AcknowledgeConfirmationStep(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def on_custom_button_triggered(self):
+ CustomButtonProcessStep = ScenarioStep.get_step("distribute_alerts", "CustomButtonProcessStep")
+ step = CustomButtonProcessStep(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def on_wiped(self):
+ WipeGroupStep = ScenarioStep.get_step("distribute_alerts", "WipeGroupStep")
+ step = WipeGroupStep(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def on_deleted(self):
+ DeleteGroupStep = ScenarioStep.get_step("distribute_alerts", "DeleteGroupStep")
+ step = DeleteGroupStep(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def get_handler(self):
+ handler_name = self.get_handler_name()
+ if hasattr(self, handler_name):
+ handler = getattr(self, handler_name)
+ else:
+ handler = self.on_handler_not_found
+
+ return handler
+
+ def get_handler_name(self):
+ return self.HANDLER_PREFIX + self.get_handlers_map()[self.log_record.type]
+
+ @classmethod
+ def on_handler_not_found(cls):
+ pass
diff --git a/engine/apps/slack/representatives/user_representative.py b/engine/apps/slack/representatives/user_representative.py
new file mode 100644
index 0000000000..5ca6389028
--- /dev/null
+++ b/engine/apps/slack/representatives/user_representative.py
@@ -0,0 +1,47 @@
+from apps.slack.scenarios.scenario_step import ScenarioStep
+from apps.user_management.user_representative import UserAbstractRepresentative
+
+
+class UserSlackRepresentative(UserAbstractRepresentative):
+ def __init__(self, log_record):
+ self.log_record = log_record
+
+ def is_applicable(self):
+ return (
+ self.log_record.alert_group.slack_message is not None
+ and self.log_record.alert_group.channel.organization.slack_team_identity is not None
+ )
+
+ @classmethod
+ def on_user_action_triggered(cls, **kwargs):
+ log_record = kwargs["log_record"]
+ instance = cls(log_record)
+ if instance.is_applicable():
+ handler_name = instance.get_handler_name()
+ if hasattr(instance, handler_name):
+ handler = getattr(instance, handler_name)
+ handler()
+ else:
+ cls.on_handler_not_found()
+
+ def on_triggered(self):
+ NotificationDeliveryStep = ScenarioStep.get_step("notification_delivery", "NotificationDeliveryStep")
+ step = NotificationDeliveryStep(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def on_failed(self):
+ NotificationDeliveryStep = ScenarioStep.get_step("notification_delivery", "NotificationDeliveryStep")
+ step = NotificationDeliveryStep(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def on_success(self):
+ NotificationDeliveryStep = ScenarioStep.get_step("notification_delivery", "NotificationDeliveryStep")
+ step = NotificationDeliveryStep(self.log_record.alert_group.channel.organization.slack_team_identity)
+ step.process_signal(self.log_record)
+
+ def get_handler_name(self):
+ return self.HANDLER_PREFIX + self.get_handlers_map()[self.log_record.type]
+
+ @classmethod
+ def on_handler_not_found(cls):
+ pass
diff --git a/engine/apps/slack/scenarios/__init__.py b/engine/apps/slack/scenarios/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/slack/scenarios/alertgroup_appearance.py b/engine/apps/slack/scenarios/alertgroup_appearance.py
new file mode 100644
index 0000000000..edf0a70423
--- /dev/null
+++ b/engine/apps/slack/scenarios/alertgroup_appearance.py
@@ -0,0 +1,357 @@
+import json
+
+from django.apps import apps
+from django.db import transaction
+from jinja2 import TemplateSyntaxError
+from rest_framework.response import Response
+
+from apps.slack.scenarios import scenario_step
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.constants.role import Role
+from common.jinja_templater import jinja_template_env
+
+from .step_mixins import CheckAlertIsUnarchivedMixin, IncidentActionsAccessControlMixin
+
+
+class OpenAlertAppearanceDialogStep(
+ CheckAlertIsUnarchivedMixin, IncidentActionsAccessControlMixin, scenario_step.ScenarioStep
+):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ ALLOWED_ROLES = [Role.ADMIN]
+ ACTION_VERBOSE = "open Alert Appearance"
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+
+ try:
+ message_ts = payload["message_ts"]
+ except KeyError:
+ message_ts = payload["container"]["message_ts"]
+
+ try:
+ alert_group_pk = payload["actions"][0]["action_id"].split("_")[1]
+ except (KeyError, IndexError):
+ value = json.loads(payload["actions"][0]["value"])
+ alert_group_pk = value["alert_group_pk"]
+
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
+ if not self.check_alert_is_unarchived(slack_team_identity, payload, alert_group):
+ return
+ blocks = []
+
+ private_metadata = {
+ "organization_id": self.organization.pk if self.organization else alert_group.organization.pk,
+ "alert_group_pk": alert_group_pk,
+ "message_ts": message_ts,
+ }
+
+ integration = alert_group.channel.integration
+
+ PAYLOAD_TEXT_SIZE = 3000
+ raw_request_data = json.dumps(alert_group.alerts.first().raw_request_data, sort_keys=True, indent=4)
+
+ # This is a special case for amazon sns notifications in str format CHEKED
+ if alert_group.channel.integration == AlertReceiveChannel.INTEGRATION_AMAZON_SNS and raw_request_data == "{}":
+ raw_request_data = alert_group.alerts.first().message
+
+ raw_request_data_chunks = [
+ raw_request_data[i : i + PAYLOAD_TEXT_SIZE] for i in range(0, len(raw_request_data), PAYLOAD_TEXT_SIZE)
+ ]
+ for idx, chunk in enumerate(raw_request_data_chunks):
+ block = {
+ "type": "input",
+ "block_id": f"payload_{idx}",
+ "label": {
+ "type": "plain_text",
+ "text": f"Payload (Part {idx + 1}):" if len(raw_request_data_chunks) > 1 else "Payload (Readonly)",
+ },
+ "element": {
+ "type": "plain_text_input",
+ "placeholder": {
+ "type": "plain_text",
+ "text": "Payload of the current alert",
+ },
+ "action_id": UpdateAppearanceStep.routing_uid(),
+ "multiline": True,
+ },
+ "optional": True,
+ "hint": {"type": "plain_text", "text": "This is example payload of the first alert of the group"},
+ }
+ block["element"]["initial_value"] = chunk
+ blocks.append(block)
+ blocks.append({"type": "divider"})
+
+ for notification_channel in ["slack", "web", "sms", "phone_call", "email", "telegram"]:
+ blocks.append(
+ {
+ "type": "header",
+ "text": {
+ "type": "plain_text",
+ "text": f"{notification_channel.replace('_', ' ').title()} Templates",
+ "emoji": True,
+ },
+ }
+ )
+ for templatizable_attr in ["title", "message", "image_url"]:
+ try:
+ attr = getattr(alert_group.channel, f"{notification_channel}_{templatizable_attr}_template")
+ except AttributeError:
+ continue
+ block = {
+ "type": "input",
+ "block_id": f"{notification_channel}_{templatizable_attr}_template",
+ "label": {
+ "type": "plain_text",
+ "text": f"{notification_channel.capitalize()} {templatizable_attr}:",
+ },
+ "element": {
+ "type": "plain_text_input",
+ "placeholder": {"type": "plain_text", "text": f"{{{{ payload.{templatizable_attr} }}}}"},
+ "action_id": UpdateAppearanceStep.routing_uid(),
+ "multiline": True,
+ },
+ "optional": True,
+ "hint": {
+ "type": "plain_text",
+ "text": "Jinja2 template",
+ },
+ }
+ if attr is not None:
+ block["element"]["initial_value"] = attr
+ else:
+ default_values = getattr(
+ AlertReceiveChannel,
+ f"INTEGRATION_TO_DEFAULT_{notification_channel.upper()}_{templatizable_attr.upper()}_TEMPLATE",
+ None,
+ )
+ if default_values is not None:
+ default_value = default_values.get(integration)
+ if default_value is not None:
+ block["element"]["initial_value"] = default_value
+ blocks.append(block)
+ blocks.append({"type": "divider"})
+
+ common_templates_meta_data = {
+ "source_link": {"placeholder": "{{ payload.link_to_upstream_details }}", "hint": "Jinja2 template."},
+ "grouping_id": {"placeholder": "{{ payload.uid }}", "hint": "Jinja2 template"},
+ "resolve_condition": {
+ "placeholder": '{{ 1 if payload.state == "OK" else 0 }}',
+ "hint": "This Jinja2 template should output one of the following values: ok, true, 1 (case insensitive)",
+ },
+ "acknowledge_condition": {
+ "placeholder": '{{ 1 if payload.state == "OK" else 0 }}',
+ "hint": "This Jinja2 template should output one of the following values: ok, true, 1 (case insensitive)",
+ },
+ }
+
+ for common_template in common_templates_meta_data.keys():
+ try:
+ attr = getattr(alert_group.channel, f"{common_template}_template")
+ except AttributeError:
+ continue
+
+ block = {
+ "type": "input",
+ "block_id": f"{common_template}_template",
+ "label": {
+ "type": "plain_text",
+ "text": f"{common_template.capitalize().replace('_', ' ')}:",
+ },
+ "element": {
+ "type": "plain_text_input",
+ "placeholder": {
+ "type": "plain_text",
+ "text": common_templates_meta_data[common_template]["placeholder"],
+ },
+ "action_id": UpdateAppearanceStep.routing_uid(),
+ "multiline": True,
+ },
+ "optional": True,
+ "hint": {
+ "type": "plain_text",
+ "text": common_templates_meta_data[common_template]["hint"],
+ },
+ }
+ if attr is not None:
+ block["element"]["initial_value"] = attr
+ else:
+ default_values = getattr(
+ AlertReceiveChannel, f"INTEGRATION_TO_DEFAULT_{common_template.upper()}_TEMPLATE", None
+ )
+ if default_values is not None:
+ default_value = default_values.get(integration)
+ if default_value:
+ block["element"]["initial_value"] = default_value
+ blocks.append(block)
+
+ view = {
+ "callback_id": UpdateAppearanceStep.routing_uid(),
+ "blocks": blocks,
+ "type": "modal",
+ "title": {
+ "type": "plain_text",
+ "text": "Incident template",
+ },
+ "submit": {
+ "type": "plain_text",
+ "text": "Submit",
+ },
+ "private_metadata": json.dumps(private_metadata),
+ }
+
+ self._slack_client.api_call(
+ "views.open",
+ trigger_id=payload["trigger_id"],
+ view=view,
+ )
+
+
+class UpdateAppearanceStep(scenario_step.ScenarioStep):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+
+ private_metadata = json.loads(payload["view"]["private_metadata"])
+ alert_group_pk = private_metadata["alert_group_pk"]
+ payload_values = payload["view"]["state"]["values"]
+
+ with transaction.atomic():
+ alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk).select_for_update().get()
+ integration = alert_group.channel.integration
+ alert_receive_channel = alert_group.channel
+ old_state = alert_receive_channel.repr_settings_for_client_side_logging
+
+ for templatizable_attr in ["title", "message", "image_url"]:
+ for notification_channel in ["slack", "web", "sms", "phone_call", "email", "telegram"]:
+ attr_name = f"{notification_channel}_{templatizable_attr}_template"
+ try:
+ old_value = getattr(alert_receive_channel, attr_name)
+ except AttributeError:
+ continue
+ new_value = payload_values[attr_name][self.routing_uid()].get("value")
+
+ if new_value is None and old_value is not None:
+ setattr(alert_receive_channel, attr_name, None)
+ alert_receive_channel.save()
+ # Drop caches for current alert group
+ if notification_channel == "web":
+ setattr(alert_group, f"cached_render_for_web_{templatizable_attr}", None)
+ alert_group.save()
+ elif new_value is not None:
+ default_values = getattr(
+ AlertReceiveChannel,
+ f"INTEGRATION_TO_DEFAULT_{notification_channel.upper()}_{templatizable_attr.upper()}_TEMPLATE",
+ None,
+ )
+ if default_values is not None:
+ default_value = default_values.get(integration)
+
+ try:
+ if default_value is None or new_value.strip() != default_value.strip():
+ jinja_template_env.from_string(new_value)
+ setattr(alert_receive_channel, attr_name, new_value)
+ alert_receive_channel.save()
+ # Drop caches for current alert group
+ if notification_channel == "web":
+ setattr(alert_group, f"cached_render_for_web_{templatizable_attr}", None)
+ alert_group.save()
+ elif default_value is not None and new_value.strip() == default_value.strip():
+ new_value = None
+ setattr(alert_receive_channel, attr_name, new_value)
+ alert_receive_channel.save()
+ # Drop caches for current alert group
+ if notification_channel == "web":
+ setattr(alert_group, f"cached_render_for_web_{templatizable_attr}", None)
+ alert_group.save()
+ except TemplateSyntaxError:
+ return Response(
+ {"response_action": "errors", "errors": {attr_name: "Template has incorrect format"}},
+ headers={"content-type": "application/json"},
+ )
+
+ common_templates = ["source_link", "grouping_id", "resolve_condition", "acknowledge_condition"]
+ for common_template in common_templates:
+ attr_name = f"{common_template}_template"
+ try:
+ old_value = getattr(alert_receive_channel, attr_name)
+ except AttributeError:
+ continue
+ new_value = payload_values[attr_name][self.routing_uid()].get("value")
+
+ if new_value is None and old_value is not None:
+ setattr(alert_receive_channel, attr_name, None)
+ alert_receive_channel.save()
+ alert_group.save()
+ elif new_value is not None:
+ default_values = getattr(
+ AlertReceiveChannel, f"INTEGRATION_TO_DEFAULT_{common_template.upper()}_TEMPLATE", None
+ )
+ if default_values is not None:
+ default_value = default_values.get(integration)
+
+ try:
+ if default_value is None or new_value.strip() != default_value.strip():
+ jinja_template_env.from_string(new_value)
+ setattr(alert_receive_channel, attr_name, new_value)
+ alert_receive_channel.save()
+ alert_group.save()
+ elif default_value is not None and new_value.strip() == default_value.strip():
+ new_value = None
+ setattr(alert_receive_channel, attr_name, new_value)
+ alert_receive_channel.save()
+ alert_group.save()
+ except TemplateSyntaxError:
+ return Response(
+ {"response_action": "errors", "errors": {common_template: "Template has incorrect format"}},
+ headers={"content-type": "application/json"},
+ )
+
+ new_state = alert_receive_channel.repr_settings_for_client_side_logging
+
+ if new_state != old_state:
+ description = f"Integration settings was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(
+ self.organization, self.user, OrganizationLogType.TYPE_INTEGRATION_CHANGED, description
+ )
+
+ attachments = alert_group.render_slack_attachments()
+ blocks = alert_group.render_slack_blocks()
+
+ self._slack_client.api_call(
+ "chat.update",
+ channel=alert_group.slack_message.channel_id,
+ ts=alert_group.slack_message.slack_id,
+ attachments=attachments,
+ blocks=blocks,
+ )
+
+
+STEPS_ROUTING = [
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_INTERACTIVE_MESSAGE,
+ "action_type": scenario_step.ACTION_TYPE_BUTTON,
+ "action_name": OpenAlertAppearanceDialogStep.routing_uid(),
+ "step": OpenAlertAppearanceDialogStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,
+ "block_action_type": scenario_step.BLOCK_ACTION_TYPE_BUTTON,
+ "block_action_id": OpenAlertAppearanceDialogStep.routing_uid(),
+ "step": OpenAlertAppearanceDialogStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_VIEW_SUBMISSION,
+ "view_callback_id": UpdateAppearanceStep.routing_uid(),
+ "step": UpdateAppearanceStep,
+ },
+]
diff --git a/engine/apps/slack/scenarios/distribute_alerts.py b/engine/apps/slack/scenarios/distribute_alerts.py
new file mode 100644
index 0000000000..8933e71776
--- /dev/null
+++ b/engine/apps/slack/scenarios/distribute_alerts.py
@@ -0,0 +1,1333 @@
+import json
+import logging
+from contextlib import suppress
+from datetime import datetime
+
+from django.apps import apps
+from django.core.cache import cache
+from django.utils import timezone
+from jinja2 import TemplateError
+
+from apps.alerts.constants import ActionSource
+from apps.alerts.incident_appearance.renderers.constants import DEFAULT_BACKUP_TITLE
+from apps.alerts.incident_appearance.renderers.slack_renderer import AlertSlackRenderer
+from apps.alerts.models import AlertGroup, AlertGroupLogRecord, AlertReceiveChannel, Invitation
+from apps.alerts.tasks import custom_button_result
+from apps.alerts.utils import render_curl_command
+from apps.slack.constants import CACHE_UPDATE_INCIDENT_SLACK_MESSAGE_LIFETIME, SLACK_RATE_LIMIT_DELAY
+from apps.slack.scenarios import scenario_step
+from apps.slack.scenarios.slack_renderer import AlertGroupLogSlackRenderer
+from apps.slack.slack_client import SlackClientWithErrorHandling
+from apps.slack.slack_client.exceptions import (
+ SlackAPIChannelArchivedException,
+ SlackAPIException,
+ SlackAPIRateLimitException,
+ SlackAPITokenException,
+)
+from apps.slack.slack_formatter import SlackFormatter
+from apps.slack.tasks import (
+ post_or_update_log_report_message_task,
+ send_message_to_thread_if_bot_not_in_channel,
+ update_incident_slack_message,
+)
+from apps.slack.utils import get_cache_key_update_incident_slack_message
+from common.constants.role import Role
+from common.utils import clean_markup, is_string_with_visible_characters
+
+from .step_mixins import CheckAlertIsUnarchivedMixin, IncidentActionsAccessControlMixin
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+class AlertShootingStep(scenario_step.ScenarioStep):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_TRIGGERED_BY_SYSTEM,
+ ]
+
+ def publish_slack_messages(self, slack_team_identity, alert_group, alert, attachments, channel_id, blocks):
+ SlackMessage = apps.get_model("slack", "SlackMessage")
+ # channel_id can be None if general log channel for slack_team_identity is not set
+ if channel_id is None:
+ logger.info(f"Failed to post message to Slack for alert_group {alert_group.pk} because channel_id is None")
+ alert_group.reason_to_skip_escalation = AlertGroup.CHANNEL_NOT_SPECIFIED
+ alert_group.save(update_fields=["reason_to_skip_escalation"])
+ print("Not delivering alert due to channel_id is None.")
+ return
+
+ try:
+ result = self._slack_client.api_call(
+ "chat.postMessage", channel=channel_id, attachments=attachments, blocks=blocks
+ )
+
+ slack_message = SlackMessage.objects.create(
+ slack_id=result["ts"],
+ organization=alert_group.channel.organization,
+ _slack_team_identity=slack_team_identity,
+ channel_id=channel_id,
+ alert_group=alert_group,
+ )
+
+ alert_group.slack_message = slack_message
+ alert_group.save(update_fields=["slack_message"])
+
+ # If alert was made out of a message:
+ if alert_group.channel.integration == AlertReceiveChannel.INTEGRATION_SLACK_CHANNEL:
+ channel = json.loads(alert.integration_unique_data)["channel"]
+ result = self._slack_client.api_call(
+ "chat.postMessage",
+ channel=channel,
+ thread_ts=json.loads(alert.integration_unique_data)["ts"],
+ text=":rocket: <{}|Incident registered!>".format(alert_group.slack_message.permalink),
+ team=slack_team_identity,
+ )
+ SlackMessage(
+ slack_id=result["ts"],
+ organization=alert_group.channel.organization,
+ _slack_team_identity=self.slack_team_identity,
+ channel_id=channel,
+ alert_group=alert_group,
+ ).save()
+
+ alert.delivered = True
+ except SlackAPITokenException:
+ alert_group.reason_to_skip_escalation = AlertGroup.ACCOUNT_INACTIVE
+ alert_group.save(update_fields=["reason_to_skip_escalation"])
+ print("Not delivering alert due to account_inactive.")
+ except SlackAPIChannelArchivedException:
+ alert_group.reason_to_skip_escalation = AlertGroup.CHANNEL_ARCHIVED
+ alert_group.save(update_fields=["reason_to_skip_escalation"])
+ print("Not delivering alert due to channel is archived.")
+ except SlackAPIRateLimitException as e:
+ # don't rate limit maintenance alert
+ if alert_group.channel.integration != AlertReceiveChannel.INTEGRATION_MAINTENANCE:
+ alert_group.reason_to_skip_escalation = AlertGroup.RATE_LIMITED
+ alert_group.save(update_fields=["reason_to_skip_escalation"])
+ delay = e.response.get("rate_limit_delay") or SLACK_RATE_LIMIT_DELAY
+ alert_group.channel.start_send_rate_limit_message_task(delay)
+ print("Not delivering alert due to slack rate limit.")
+ else:
+ raise e
+ except SlackAPIException as e:
+ # TODO: slack-onprem check exceptions
+ if e.response["error"] == "channel_not_found":
+ alert_group.reason_to_skip_escalation = AlertGroup.CHANNEL_ARCHIVED
+ alert_group.save(update_fields=["reason_to_skip_escalation"])
+ print("Not delivering alert due to channel is archived.")
+ elif e.response["error"] == "restricted_action":
+ # workspace settings prevent bot to post message (eg. bot is not a full member)
+ alert_group.reason_to_skip_escalation = AlertGroup.RESTRICTED_ACTION
+ alert_group.save(update_fields=["reason_to_skip_escalation"])
+ print("Not delivering alert due to workspace restricted action.")
+ else:
+ raise e
+ finally:
+ alert.save()
+
+ def process_signal(self, alert):
+ # do not try to post alert group message to slack if its channel is rate limited
+ if alert.group.channel.is_rate_limited_in_slack:
+ logger.info("Skip posting or updating alert_group in Slack due to rate limit")
+ AlertGroup.all_objects.filter(
+ pk=alert.group.pk,
+ slack_message_sent=False,
+ ).update(slack_message_sent=True, reason_to_skip_escalation=AlertGroup.RATE_LIMITED)
+ return
+
+ num_updated_rows = AlertGroup.all_objects.filter(pk=alert.group.pk, slack_message_sent=False).update(
+ slack_message_sent=True
+ )
+
+ if num_updated_rows == 1:
+ try:
+ channel_id = alert.group.channel_filter.slack_channel_id_or_general_log_id
+ self._send_first_alert(alert, channel_id)
+ except SlackAPIException as e:
+ AlertGroup.all_objects.filter(pk=alert.group.pk).update(slack_message_sent=False)
+ raise e
+
+ is_debug_mode = (
+ alert.group.channel.maintenance_mode is not None
+ or alert.group.channel.organization.maintenance_mode is not None
+ )
+ if is_debug_mode:
+ self._send_debug_mode_notice(alert.group, channel_id)
+ else:
+ # check if alert group was posted to slack before posting message to thread
+ if not alert.group.skip_escalation_in_slack:
+ self._send_thread_messages(alert.group, channel_id)
+ else:
+ # check if alert group was posted to slack before updating its message
+ if not alert.group.skip_escalation_in_slack:
+ update_task_id = update_incident_slack_message.apply_async(
+ (self.slack_team_identity.pk, alert.group.pk),
+ countdown=10,
+ )
+ cache.set(
+ get_cache_key_update_incident_slack_message(alert.group.pk),
+ update_task_id,
+ timeout=CACHE_UPDATE_INCIDENT_SLACK_MESSAGE_LIFETIME,
+ )
+ else:
+ logger.info("Skip updating alert_group in Slack due to rate limit")
+
+ def _send_first_alert(self, alert, channel_id):
+ attachments = alert.group.render_slack_attachments()
+ blocks = alert.group.render_slack_blocks()
+
+ self.publish_slack_messages(
+ slack_team_identity=self.slack_team_identity,
+ alert_group=alert.group,
+ alert=alert,
+ attachments=attachments,
+ channel_id=channel_id,
+ blocks=blocks,
+ )
+
+ def _send_debug_mode_notice(self, alert_group, channel_id):
+ blocks = []
+ text = "Escalations are silenced due to Debug mode"
+ blocks.append({"type": "section", "text": {"type": "mrkdwn", "text": text}})
+ self._slack_client.api_call(
+ "chat.postMessage",
+ channel=channel_id,
+ attachments=[],
+ thread_ts=alert_group.slack_message.slack_id,
+ mrkdwn=True,
+ blocks=blocks,
+ )
+
+ def _send_thread_messages(self, alert_group, channel_id):
+ post_or_update_log_report_message_task.apply_async(
+ (alert_group.pk, self.slack_team_identity.pk),
+ )
+
+ send_message_to_thread_if_bot_not_in_channel.apply_async(
+ (alert_group.pk, self.slack_team_identity.pk, channel_id),
+ countdown=1, # delay for message so that the log report is published first
+ )
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, alert, payload=None):
+ pass
+
+
+class InviteOtherPersonToIncident(
+ CheckAlertIsUnarchivedMixin,
+ IncidentActionsAccessControlMixin,
+ scenario_step.ScenarioStep,
+):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ ALLOWED_ROLES = [Role.ADMIN, Role.EDITOR]
+ ACTION_VERBOSE = "invite to incident"
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ User = apps.get_model("user_management", "User")
+
+ alert_group = self.get_alert_group_from_slack_message(payload)
+ selected_user = None
+
+ if not self.check_alert_is_unarchived(slack_team_identity, payload, alert_group):
+ return
+
+ try:
+ # user selection
+ selected_user_id = json.loads(payload["actions"][0]["selected_option"]["value"])["user_id"]
+ if selected_user_id is not None: # None if there are no users to select
+ selected_user = User.objects.get(pk=selected_user_id)
+ except (KeyError, json.JSONDecodeError):
+ # for old version with user slack_id selection
+ warning_text = "Oops! Something goes wrong, please try again"
+ self.open_warning_window(payload, warning_text)
+ if selected_user is not None:
+ Invitation.invite_user(selected_user, alert_group, self.user)
+ else:
+ self._update_slack_message(alert_group)
+
+ def process_signal(self, log_record):
+ alert_group = log_record.alert_group
+ self._update_slack_message(alert_group)
+
+
+class SilenceGroupStep(
+ CheckAlertIsUnarchivedMixin,
+ IncidentActionsAccessControlMixin,
+ scenario_step.ScenarioStep,
+):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ ALLOWED_ROLES = [Role.ADMIN, Role.EDITOR]
+ ACTION_VERBOSE = "silence incident"
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+
+ try:
+ silence_delay = int(payload["actions"][0]["selected_options"][0]["value"])
+ except KeyError:
+ silence_delay = int(payload["actions"][0]["selected_option"]["value"])
+
+ alert_group = self.get_alert_group_from_slack_message(payload)
+
+ if self.check_alert_is_unarchived(slack_team_identity, payload, alert_group):
+ alert_group.silence_by_user(self.user, silence_delay, action_source=ActionSource.SLACK)
+
+ def process_signal(self, log_record):
+ alert_group = log_record.alert_group
+ self._update_slack_message(alert_group)
+
+
+class UnSilenceGroupStep(
+ CheckAlertIsUnarchivedMixin,
+ IncidentActionsAccessControlMixin,
+ scenario_step.ScenarioStep,
+):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ ALLOWED_ROLES = [Role.ADMIN, Role.EDITOR]
+ ACTION_VERBOSE = "unsilence incident"
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+
+ alert_group = self.get_alert_group_from_slack_message(payload)
+ if self.check_alert_is_unarchived(slack_team_identity, payload, alert_group):
+ alert_group.un_silence_by_user(self.user, action_source=ActionSource.SLACK)
+
+ def process_signal(self, log_record):
+ alert_group = log_record.alert_group
+ self._update_slack_message(alert_group)
+
+
+class SelectAttachGroupStep(
+ CheckAlertIsUnarchivedMixin,
+ IncidentActionsAccessControlMixin,
+ scenario_step.ScenarioStep,
+):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ ALLOWED_ROLES = [Role.ADMIN, Role.EDITOR]
+ ACTION_VERBOSE = "Select Incident for Attaching to"
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ value = json.loads(payload["actions"][0]["value"])
+ alert_group_pk = value.get("alert_group_pk")
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
+
+ if not self.check_alert_is_unarchived(slack_team_identity, payload, alert_group):
+ return
+ blocks = []
+ view = {
+ "callback_id": AttachGroupStep.routing_uid(),
+ "blocks": blocks,
+ "type": "modal",
+ "title": {
+ "type": "plain_text",
+ "text": "Attach to Incident",
+ },
+ "private_metadata": json.dumps(
+ {
+ "organization_id": self.organization.pk if self.organization else alert_group.organization.pk,
+ "alert_group_pk": alert_group_pk,
+ }
+ ),
+ "close": {"type": "plain_text", "text": "Cancel", "emoji": True},
+ }
+ attached_incidents_exists = alert_group.dependent_alert_groups.exists()
+ if attached_incidents_exists:
+ attached_incidents = alert_group.dependent_alert_groups.all()
+ text = (
+ f"Oops! This incident cannot be attached to another one because it already has "
+ f"attached incidents ({attached_incidents.count()}):\n"
+ )
+ for dependent_alert in attached_incidents:
+ if dependent_alert.permalink:
+ dependent_alert_text = (
+ f"\n<{dependent_alert.permalink}|{dependent_alert.long_verbose_name_without_formatting}>"
+ )
+ else:
+ dependent_alert_text = f"\n{dependent_alert.long_verbose_name}"
+ if len(dependent_alert_text + text) <= 2995: # max 3000 symbols
+ text += dependent_alert_text
+ else:
+ text += "\n..."
+ break
+ blocks.append(
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": text,
+ },
+ }
+ )
+ else:
+ blocks.extend(self.get_select_incidents_blocks(alert_group))
+ if blocks:
+ view["submit"] = {
+ "type": "plain_text",
+ "text": "Submit",
+ }
+ else:
+ blocks.append(
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": "Oops! There is no incidents, available to attach.",
+ },
+ }
+ )
+ self._slack_client.api_call(
+ "views.open",
+ trigger_id=payload["trigger_id"],
+ view=view,
+ )
+
+ def get_select_incidents_blocks(self, alert_group):
+ collected_options = []
+ blocks = []
+
+ alert_receive_channel_ids = AlertReceiveChannel.objects.filter(
+ organization=alert_group.channel.organization
+ ).values_list("id", flat=True)
+
+ alert_groups_queryset = (
+ AlertGroup.unarchived_objects.prefetch_related(
+ "alerts",
+ "channel__organization",
+ )
+ .filter(channel_id__in=list(alert_receive_channel_ids), resolved=False, root_alert_group__isnull=True)
+ .exclude(pk=alert_group.pk)
+ .order_by("-pk")
+ )
+
+ for alert_group_to_attach in alert_groups_queryset[:60]:
+ # long_verbose_name_without_formatting was removed from here because it increases queries count due to
+ # alerts.first().
+ # alert_group_to_attach.alerts.exists() and alerts.all()[0] don't make additional queries to db due to
+ # prefetch_related.
+ first_alert = alert_group_to_attach.alerts.all()[0]
+ templated_alert = AlertSlackRenderer(first_alert).templated_alert
+ sf = SlackFormatter(alert_group_to_attach.channel.organization)
+ if is_string_with_visible_characters(templated_alert.title):
+ alert_name = templated_alert.title
+ alert_name = sf.format(alert_name)
+ alert_name = clean_markup(alert_name)
+ else:
+ alert_name = (
+ f"#{alert_group_to_attach.inside_organization_number} "
+ f"{DEFAULT_BACKUP_TITLE} via {alert_group_to_attach.channel.verbal_name}"
+ )
+ if len(alert_name) > 75:
+ alert_name = f"{alert_name[:72]}..."
+ collected_options.append(
+ {
+ "text": {"type": "plain_text", "text": f"{alert_name}", "emoji": True},
+ "value": str(alert_group_to_attach.pk),
+ }
+ )
+ if len(collected_options) > 0:
+ blocks.append(
+ {
+ "type": "input",
+ "block_id": self.routing_uid(),
+ "element": {
+ "type": "static_select",
+ "placeholder": {
+ "type": "plain_text",
+ "text": "Attach to...",
+ },
+ "action_id": AttachGroupStep.routing_uid(),
+ "options": collected_options[:60],
+ },
+ "label": {
+ "type": "plain_text",
+ "text": "Select incident:",
+ "emoji": True,
+ },
+ }
+ )
+ return blocks
+
+
+class AttachGroupStep(
+ CheckAlertIsUnarchivedMixin,
+ IncidentActionsAccessControlMixin,
+ scenario_step.ScenarioStep,
+):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ ALLOWED_ROLES = [Role.ADMIN, Role.EDITOR]
+ ACTION_VERBOSE = "Attach incident"
+
+ def process_signal(self, log_record):
+ alert_group = log_record.alert_group
+
+ if log_record.type == AlertGroupLogRecord.TYPE_ATTACHED and log_record.alert_group.is_maintenance_incident:
+ attachments = [
+ {"callback_id": "alert", "text": "{}".format(log_record.rendered_log_line_action(for_slack=True))},
+ ]
+ self._publish_message_to_thread(alert_group, attachments)
+
+ if log_record.type == AlertGroupLogRecord.TYPE_FAILED_ATTACHMENT:
+ ephemeral_text = log_record.rendered_log_line_action(for_slack=True)
+ slack_user_identity = log_record.author.slack_user_identity
+
+ if slack_user_identity:
+ self._slack_client.api_call(
+ "chat.postEphemeral",
+ user=slack_user_identity.slack_id,
+ channel=alert_group.slack_message.channel_id,
+ text="{}{}".format(ephemeral_text[:1].upper(), ephemeral_text[1:]),
+ unfurl_links=True,
+ )
+
+ self._update_slack_message(alert_group)
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+
+ # submit selection in modal window
+ if payload["type"] == scenario_step.PAYLOAD_TYPE_VIEW_SUBMISSION:
+ alert_group_pk = json.loads(payload["view"]["private_metadata"])["alert_group_pk"]
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
+ root_alert_group_pk = payload["view"]["state"]["values"][SelectAttachGroupStep.routing_uid()][
+ AttachGroupStep.routing_uid()
+ ]["selected_option"]["value"]
+ root_alert_group = AlertGroup.all_objects.get(pk=root_alert_group_pk)
+ # old version of attach selection by dropdown
+ else:
+ try:
+ root_alert_group_pk = int(payload["actions"][0]["selected_options"][0]["value"])
+ except KeyError:
+ root_alert_group_pk = int(payload["actions"][0]["selected_option"]["value"])
+
+ root_alert_group = AlertGroup.all_objects.get(pk=root_alert_group_pk)
+ alert_group = self.get_alert_group_from_slack_message(payload)
+
+ if self.check_alert_is_unarchived(slack_team_identity, payload, alert_group) and self.check_alert_is_unarchived(
+ slack_team_identity, payload, root_alert_group
+ ):
+ alert_group.attach_by_user(self.user, root_alert_group, action_source=ActionSource.SLACK)
+ else:
+ self._update_slack_message(alert_group)
+
+
+class UnAttachGroupStep(
+ CheckAlertIsUnarchivedMixin,
+ IncidentActionsAccessControlMixin,
+ scenario_step.ScenarioStep,
+):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ ALLOWED_ROLES = [Role.ADMIN, Role.EDITOR]
+ ACTION_VERBOSE = "Unattach incident"
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ alert_group = self.get_alert_group_from_slack_message(payload)
+ if self.check_alert_is_unarchived(slack_team_identity, payload, alert_group):
+ alert_group.un_attach_by_user(self.user, action_source=ActionSource.SLACK)
+
+ def process_signal(self, log_record):
+ alert_group = log_record.alert_group
+ self._update_slack_message(alert_group)
+
+
+class StopInvitationProcess(CheckAlertIsUnarchivedMixin, IncidentActionsAccessControlMixin, scenario_step.ScenarioStep):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ ALLOWED_ROLES = [Role.ADMIN, Role.EDITOR]
+ ACTION_VERBOSE = "stop invitation"
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ alert_group = self.get_alert_group_from_slack_message(payload)
+ if not self.check_alert_is_unarchived(slack_team_identity, payload, alert_group):
+ return
+
+ invitation_pk = payload["actions"][0]["name"].split("_")[1]
+ Invitation.stop_invitation(invitation_pk, self.user)
+
+ def process_signal(self, log_record):
+ self._update_slack_message(log_record.invitation.alert_group)
+
+
+class CustomButtonProcessStep(
+ CheckAlertIsUnarchivedMixin,
+ IncidentActionsAccessControlMixin,
+ scenario_step.ScenarioStep,
+):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ ALLOWED_ROLES = [Role.ADMIN, Role.EDITOR]
+ ACTION_VERBOSE = "click custom button"
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ CustomButtom = apps.get_model("alerts", "CustomButton")
+ alert_group = self.get_alert_group_from_slack_message(payload)
+ if self.check_alert_is_unarchived(slack_team_identity, payload, alert_group):
+ custom_button_pk = payload["actions"][0]["name"].split("_")[1]
+ alert_group_pk = payload["actions"][0]["name"].split("_")[2]
+ try:
+ CustomButtom.objects.get(pk=custom_button_pk)
+ except CustomButtom.DoesNotExist:
+ warning_text = "Oops! This button was deleted"
+ self.open_warning_window(payload, warning_text=warning_text)
+ self._update_slack_message(alert_group)
+ else:
+ custom_button_result.apply_async(
+ args=(
+ custom_button_pk,
+ alert_group_pk,
+ ),
+ kwargs={"user_pk": self.user.pk},
+ )
+
+ def process_signal(self, log_record):
+ alert_group = log_record.alert_group
+ result_message = log_record.reason
+ custom_button = log_record.custom_button
+ debug_message = ""
+ if not log_record.step_specific_info["is_request_successful"]:
+ with suppress(TemplateError):
+ post_kwargs = custom_button.build_post_kwargs(log_record.alert_group.alerts.first())
+ curl_request = render_curl_command(log_record.custom_button.webhook, "POST", post_kwargs)
+ debug_message = f"```{curl_request}```"
+
+ if log_record.author is not None:
+ user_verbal = log_record.author.get_user_verbal_for_team_for_slack(mention=True)
+ text = (
+ f"{user_verbal} sent a request from an outgoing webhook `{log_record.custom_button.name}` "
+ f"with the result `{result_message}`"
+ )
+ else:
+ text = (
+ f"A request from an outgoing webhook `{log_record.custom_button.name}` was sent "
+ f"according to escalation policy with the result `{result_message}`"
+ )
+ attachments = [
+ {"callback_id": "alert", "text": debug_message, "footer": text},
+ ]
+ self._publish_message_to_thread(alert_group, attachments)
+
+
+class ResolveGroupStep(
+ CheckAlertIsUnarchivedMixin,
+ IncidentActionsAccessControlMixin,
+ scenario_step.ScenarioStep,
+):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ ALLOWED_ROLES = [Role.ADMIN, Role.EDITOR]
+ ACTION_VERBOSE = "resolve incident"
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ ResolutionNoteModalStep = scenario_step.ScenarioStep.get_step("resolution_note", "ResolutionNoteModalStep")
+
+ alert_group = self.get_alert_group_from_slack_message(payload)
+
+ if not self.check_alert_is_unarchived(slack_team_identity, payload, alert_group):
+ return
+
+ if alert_group.maintenance_uuid is None:
+
+ if self.organization.is_resolution_note_required and not alert_group.has_resolution_notes:
+
+ resolution_note_data = {
+ "resolution_note_window_action": "edit",
+ "alert_group_pk": alert_group.pk,
+ "action_resolve": True,
+ }
+ ResolutionNoteModalStep(slack_team_identity, self.organization, self.user).process_scenario(
+ slack_user_identity, slack_team_identity, payload, data=resolution_note_data
+ )
+ return
+
+ alert_group.resolve_by_user(self.user, action_source=ActionSource.SLACK)
+ else:
+ alert_group.stop_maintenance(self.user)
+
+ def process_signal(self, log_record):
+ alert_group = log_record.alert_group
+
+ if not alert_group.happened_while_maintenance:
+ self._update_slack_message(alert_group)
+
+
+class UnResolveGroupStep(
+ CheckAlertIsUnarchivedMixin,
+ IncidentActionsAccessControlMixin,
+ scenario_step.ScenarioStep,
+):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ ALLOWED_ROLES = [Role.ADMIN, Role.EDITOR]
+ ACTION_VERBOSE = "unresolve incident"
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ alert_group = self.get_alert_group_from_slack_message(payload)
+ if self.check_alert_is_unarchived(slack_team_identity, payload, alert_group):
+ alert_group.un_resolve_by_user(self.user, action_source=ActionSource.SLACK)
+
+ def process_signal(self, log_record):
+ alert_group = log_record.alert_group
+ self._update_slack_message(alert_group)
+
+
+class AcknowledgeGroupStep(
+ CheckAlertIsUnarchivedMixin,
+ IncidentActionsAccessControlMixin,
+ scenario_step.ScenarioStep,
+):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ ALLOWED_ROLES = [Role.ADMIN, Role.EDITOR]
+ ACTION_VERBOSE = "acknowledge incident"
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ alert_group = self.get_alert_group_from_slack_message(payload)
+ logger.debug(f"process_scenario in AcknowledgeGroupStep for alert_group {alert_group.pk}")
+ if self.check_alert_is_unarchived(slack_team_identity, payload, alert_group):
+ alert_group.acknowledge_by_user(self.user, action_source=ActionSource.SLACK)
+
+ def process_signal(self, log_record):
+ alert_group = log_record.alert_group
+ logger.debug(f"Started process_signal in AcknowledgeGroupStep for alert_group {alert_group.pk}")
+ self._update_slack_message(alert_group)
+ logger.debug(f"Finished process_signal in AcknowledgeGroupStep for alert_group {alert_group.pk}")
+
+
+class UnAcknowledgeGroupStep(
+ CheckAlertIsUnarchivedMixin,
+ IncidentActionsAccessControlMixin,
+ scenario_step.ScenarioStep,
+):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ ALLOWED_ROLES = [Role.ADMIN, Role.EDITOR]
+ ACTION_VERBOSE = "unacknowledge incident"
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ alert_group = self.get_alert_group_from_slack_message(payload)
+ logger.debug(f"process_scenario in UnAcknowledgeGroupStep for alert_group {alert_group.pk}")
+ if self.check_alert_is_unarchived(slack_team_identity, payload, alert_group):
+ alert_group.un_acknowledge_by_user(self.user, action_source=ActionSource.SLACK)
+
+ def process_signal(self, log_record):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ alert_group = log_record.alert_group
+ logger.debug(f"Started process_signal in UnAcknowledgeGroupStep for alert_group {alert_group.pk}")
+
+ if log_record.type == AlertGroupLogRecord.TYPE_AUTO_UN_ACK:
+ channel_id = alert_group.slack_message.channel_id
+ if log_record.author is not None:
+ user_verbal = log_record.author.get_user_verbal_for_team_for_slack(mention=True)
+ else:
+ user_verbal = "No one"
+
+ message_attachments = [
+ {
+ "callback_id": "alert",
+ "text": f"{user_verbal} hasn't responded to an acknowledge timeout reminder."
+ f" Incident is unacknowledged automatically",
+ "footer": "Escalation started again...",
+ },
+ ]
+ if alert_group.slack_message.ack_reminder_message_ts:
+ try:
+ self._slack_client.api_call(
+ "chat.update",
+ channel=channel_id,
+ ts=alert_group.slack_message.ack_reminder_message_ts,
+ attachments=message_attachments,
+ )
+ except SlackAPIException as e:
+ # post to thread if ack reminder message was deleted in Slack
+ if e.response["error"] == "message_not_found":
+ self._publish_message_to_thread(alert_group, message_attachments)
+ elif e.response["error"] == "account_inactive":
+ logger.info(
+ f"Skip unacknowledge slack message for alert_group {alert_group.pk} due to account_inactive"
+ )
+ else:
+ raise
+ else:
+ self._publish_message_to_thread(alert_group, message_attachments)
+ self._update_slack_message(alert_group)
+ logger.debug(f"Finished process_signal in UnAcknowledgeGroupStep for alert_group {alert_group.pk}")
+
+
+class AcknowledgeConfirmationStep(AcknowledgeGroupStep):
+ ACTION_VERBOSE = "confirm acknowledge status"
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ alert_group_id = payload["actions"][0]["value"].split("_")[1]
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_id)
+ channel = payload["channel"]["id"]
+ message_ts = payload["message_ts"]
+
+ if alert_group.acknowledged:
+ if alert_group.acknowledged_by == AlertGroup.USER:
+ if self.user == alert_group.acknowledged_by_user:
+ user_verbal = alert_group.acknowledged_by_user.get_user_verbal_for_team_for_slack()
+ attachments = [
+ {
+ "color": "#c6c000",
+ "callback_id": "alert",
+ "text": f"{user_verbal} is confirmed to be working on this incident",
+ },
+ ]
+ self._slack_client.api_call(
+ "chat.update",
+ channel=channel,
+ ts=message_ts,
+ attachments=attachments,
+ )
+ alert_group.acknowledged_by_confirmed = datetime.utcnow()
+ alert_group.save(update_fields=["acknowledged_by_confirmed"])
+ else:
+ self._slack_client.api_call(
+ "chat.postEphemeral",
+ channel=channel,
+ user=slack_user_identity.slack_id,
+ text="This alert is acknowledged by another user. Acknowledge it yourself first.",
+ )
+ elif alert_group.acknowledged_by == AlertGroup.SOURCE:
+ user_verbal = self.user.get_user_verbal_for_team_for_slack()
+ attachments = [
+ {
+ "color": "#c6c000",
+ "callback_id": "alert",
+ "text": f"{user_verbal} is confirmed to be working on this incident",
+ },
+ ]
+ self._slack_client.api_call(
+ "chat.update",
+ channel=channel,
+ ts=message_ts,
+ attachments=attachments,
+ )
+ alert_group.acknowledged_by_confirmed = datetime.utcnow()
+ alert_group.save(update_fields=["acknowledged_by_confirmed"])
+ else:
+ self._slack_client.api_call(
+ "chat.delete",
+ channel=channel,
+ ts=message_ts,
+ )
+ self._slack_client.api_call(
+ "chat.postEphemeral",
+ channel=channel,
+ user=slack_user_identity.slack_id,
+ text="This alert is already unacknowledged.",
+ )
+
+ def process_signal(self, log_record):
+ Organization = apps.get_model("user_management", "Organization")
+ SlackMessage = apps.get_model("slack", "SlackMessage")
+
+ alert_group = log_record.alert_group
+ channel_id = alert_group.slack_message.channel_id
+ user_verbal = log_record.author.get_user_verbal_for_team_for_slack(mention=True)
+
+ if alert_group.channel.organization.unacknowledge_timeout != Organization.UNACKNOWLEDGE_TIMEOUT_NEVER:
+ attachments = [
+ {
+ "fallback": "Are you still working on this incident?",
+ "text": f"{user_verbal}, please confirm that you're still working on this incident.",
+ "callback_id": "alert",
+ "attachment_type": "default",
+ "footer": "This is a reminder that the incident is still acknowledged"
+ " and not resolved. It will be unacknowledged automatically and escalation will"
+ " start again soon.",
+ "actions": [
+ {
+ "name": scenario_step.ScenarioStep.get_step(
+ "distribute_alerts", "AcknowledgeConfirmationStep"
+ ).routing_uid(),
+ "text": "Confirm",
+ "type": "button",
+ "style": "primary",
+ "value": scenario_step.ScenarioStep.get_step(
+ "distribute_alerts", "AcknowledgeConfirmationStep"
+ ).routing_uid()
+ + ("_" + str(alert_group.pk)),
+ },
+ ],
+ }
+ ]
+ try:
+ response = self._slack_client.api_call(
+ "chat.postMessage",
+ channel=channel_id,
+ attachments=attachments,
+ thread_ts=alert_group.slack_message.slack_id,
+ )
+ except SlackAPITokenException as e:
+ logger.warning(
+ f"Unable to post acknowledge reminder in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"{e}"
+ )
+ except SlackAPIChannelArchivedException:
+ logger.warning(
+ f"Unable to post acknowledge reminder in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'is_archived'"
+ )
+ except SlackAPIException as e:
+ if e.response["error"] == "channel_not_found":
+ logger.warning(
+ f"Unable to post acknowledge reminder in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'channel_not_found'"
+ )
+ else:
+ raise e
+ else:
+ SlackMessage(
+ slack_id=response["ts"],
+ organization=alert_group.channel.organization,
+ _slack_team_identity=self.slack_team_identity,
+ channel_id=channel_id,
+ alert_group=alert_group,
+ ).save()
+
+ alert_group.slack_message.ack_reminder_message_ts = response["ts"]
+ alert_group.slack_message.save(update_fields=["ack_reminder_message_ts"])
+ else:
+ attachments = [
+ {
+ "callback_id": "alert",
+ "text": f"This is a reminder that the incident is still acknowledged by {user_verbal}"
+ f" and not resolved.",
+ },
+ ]
+ self._publish_message_to_thread(alert_group, attachments)
+
+
+class WipeGroupStep(scenario_step.ScenarioStep):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ ACTION_VERBOSE = "wipe incident"
+
+ def process_signal(self, log_record):
+ alert_group = log_record.alert_group
+ user_verbal = log_record.author.get_user_verbal_for_team_for_slack()
+ attachments = [
+ {
+ "color": "warning",
+ "callback_id": "alert",
+ "footer": "Incident wiped",
+ "text": "Wiped by {}.".format(user_verbal),
+ },
+ ]
+ self._publish_message_to_thread(alert_group, attachments)
+ self._update_slack_message(alert_group)
+
+
+class DeleteGroupStep(scenario_step.ScenarioStep):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ ACTION_VERBOSE = "delete incident"
+
+ def process_signal(self, log_record):
+ alert_group = log_record.alert_group
+
+ self.remove_resolution_note_reaction(alert_group)
+
+ bot_messages_ts = []
+ bot_messages_ts.extend(alert_group.slack_messages.values_list("slack_id", flat=True))
+ bot_messages_ts.extend(
+ alert_group.resolution_note_slack_messages.filter(posted_by_bot=True).values_list("ts", flat=True)
+ )
+ channel_id = alert_group.slack_message.channel_id
+
+ for message_ts in bot_messages_ts:
+ try:
+ self._slack_client.api_call(
+ "chat.delete",
+ channel=channel_id,
+ ts=message_ts,
+ )
+ except SlackAPITokenException as e:
+ logger.error(
+ f"Unable to delete messages in slack. Message ts: {message_ts}"
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"{e}"
+ )
+ except SlackAPIException as e:
+ if e.response["error"] == "channel_not_found":
+ logger.warning(
+ f"Unable to delete messages in slack. Message ts: {message_ts}"
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'channel_not_found'"
+ f"{e}"
+ )
+ elif e.response["error"] == "message_not_found":
+ logger.warning(
+ f"Unable to delete messages in slack. Message ts: {message_ts}"
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'message_not_found'"
+ f"{e}"
+ )
+ elif e.response["error"] == "is_archived":
+ logger.warning(
+ f"Unable to delete messages in slack. Message ts: {message_ts}"
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'is_archived'"
+ f"{e}"
+ )
+ elif e.response["error"] == "cant_delete_message":
+ sc_with_access_token = SlackClientWithErrorHandling(
+ self.slack_team_identity.access_token
+ ) # used access_token instead of bot_access_token
+ sc_with_access_token.api_call(
+ "chat.delete",
+ channel=channel_id,
+ ts=message_ts,
+ )
+ else:
+ raise e
+
+ def remove_resolution_note_reaction(self, alert_group):
+ for message in alert_group.resolution_note_slack_messages.filter(added_to_resolution_note=True):
+ message.added_to_resolution_note = False
+ message.save(update_fields=["added_to_resolution_note"])
+ try:
+ self._slack_client.api_call(
+ "reactions.remove",
+ channel=message.slack_channel_id,
+ name="memo",
+ timestamp=message.ts,
+ )
+ except SlackAPITokenException as e:
+ logger.warning(
+ f"Unable to delete resolution note reaction in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"{e}"
+ )
+ except SlackAPIException as e:
+ logger.warning(f"Unable to delete resolution note reaction in slack.\n" f"{e}")
+
+
+class UpdateLogReportMessageStep(scenario_step.ScenarioStep):
+ def process_signal(self, alert_group):
+ if alert_group.skip_escalation_in_slack or alert_group.channel.is_rate_limited_in_slack:
+ return
+
+ self.update_log_message(alert_group)
+
+ def post_log_message(self, alert_group):
+ SlackMessage = apps.get_model("slack", "SlackMessage")
+
+ slack_message = alert_group.get_slack_message()
+
+ if slack_message is None:
+ logger.info(f"Cannot post log message for alert_group {alert_group.pk} because SlackMessage doesn't exist")
+ return None
+
+ attachments = [
+ {
+ "text": "Building escalation plan... :thinking_face:",
+ }
+ ]
+ slack_log_message = alert_group.slack_log_message
+
+ if slack_log_message is None:
+ logger.debug(f"Start posting new log message for alert_group {alert_group.pk}")
+ try:
+ result = self._slack_client.api_call(
+ "chat.postMessage",
+ channel=slack_message.channel_id,
+ thread_ts=slack_message.slack_id,
+ attachments=attachments,
+ )
+ except SlackAPITokenException as e:
+ print(e)
+ except SlackAPIRateLimitException as e:
+ if not alert_group.channel.is_rate_limited_in_slack:
+ delay = e.response.get("rate_limit_delay") or SLACK_RATE_LIMIT_DELAY
+ alert_group.channel.start_send_rate_limit_message_task(delay)
+ logger.info(
+ f"Log message has not been posted for alert_group {alert_group.pk} due to slack rate limit."
+ )
+ except SlackAPIException as e:
+ if e.response["error"] == "channel_not_found":
+ pass
+ elif e.response["error"] == "invalid_auth":
+ pass
+ elif e.response["error"] == "is_archived":
+ pass
+ else:
+ raise e
+ else:
+ logger.debug(f"Create new slack_log_message for alert_group {alert_group.pk}")
+ slack_log_message = SlackMessage(
+ slack_id=result["ts"],
+ organization=self.organization,
+ _slack_team_identity=self.slack_team_identity,
+ channel_id=slack_message.channel_id,
+ last_updated=timezone.now(),
+ alert_group=alert_group,
+ )
+ slack_log_message.save()
+
+ alert_group.slack_log_message = slack_log_message
+ alert_group.save(update_fields=["slack_log_message"])
+ logger.debug(
+ f"Finished post new log message for alert_group {alert_group.pk}, "
+ f"slack_log_message with pk '{slack_log_message.pk}' was created."
+ )
+ else:
+ self.update_log_message(alert_group)
+
+ def update_log_message(self, alert_group):
+ slack_message = alert_group.get_slack_message()
+
+ if slack_message is None:
+ logger.info(
+ f"Cannot update log message for alert_group {alert_group.pk} because SlackMessage doesn't exist"
+ )
+ return None
+
+ slack_log_message = alert_group.slack_log_message
+
+ if slack_log_message is not None:
+ # prevent too frequent updates
+ if timezone.now() <= slack_log_message.last_updated + timezone.timedelta(seconds=5):
+ return
+
+ attachments = AlertGroupLogSlackRenderer.render_incident_log_report_for_slack(alert_group)
+ logger.debug(
+ f"Update log message for alert_group {alert_group.pk}, slack_log_message {slack_log_message.pk}"
+ )
+ try:
+ self._slack_client.api_call(
+ "chat.update",
+ channel=slack_message.channel_id,
+ ts=slack_log_message.slack_id,
+ attachments=attachments,
+ )
+ except SlackAPITokenException as e:
+ print(e)
+ except SlackAPIRateLimitException as e:
+ if not alert_group.channel.is_rate_limited_in_slack:
+ delay = e.response.get("rate_limit_delay") or SLACK_RATE_LIMIT_DELAY
+ alert_group.channel.start_send_rate_limit_message_task(delay)
+ logger.info(
+ f"Log message has not been updated for alert_group {alert_group.pk} due to slack rate limit."
+ )
+ except SlackAPIException as e:
+ if e.response["error"] == "message_not_found":
+ alert_group.slack_log_message = None
+ alert_group.save(update_fields=["slack_log_message"])
+ elif e.response["error"] == "channel_not_found":
+ pass
+ elif e.response["error"] == "is_archived":
+ pass
+ elif e.response["error"] == "is_inactive":
+ pass
+ elif e.response["error"] == "account_inactive":
+ pass
+ elif e.response["error"] == "invalid_auth":
+ pass
+ else:
+ raise e
+ else:
+ slack_log_message.last_updated = timezone.now()
+ slack_log_message.save(update_fields=["last_updated"])
+ logger.debug(
+ f"Finished update log message for alert_group {alert_group.pk}, "
+ f"slack_log_message {slack_log_message.pk}"
+ )
+ # check how much time has passed since slack message was created
+ # to prevent eternal loop of restarting update log message task
+ elif timezone.now() <= slack_message.created_at + timezone.timedelta(minutes=5):
+ logger.debug(
+ f"Update log message failed for alert_group {alert_group.pk}: "
+ f"log message does not exist yet. Restarting post_or_update_log_report_message_task..."
+ )
+ post_or_update_log_report_message_task.apply_async(
+ (alert_group.pk, self.slack_team_identity.pk, True),
+ countdown=3,
+ )
+ else:
+ logger.debug(f"Update log message failed for alert_group {alert_group.pk}: " f"log message does not exist.")
+
+
+STEPS_ROUTING = [
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_INTERACTIVE_MESSAGE,
+ "action_type": scenario_step.ACTION_TYPE_BUTTON,
+ "action_name": ResolveGroupStep.routing_uid(),
+ "step": ResolveGroupStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,
+ "block_action_type": scenario_step.BLOCK_ACTION_TYPE_BUTTON,
+ "block_action_id": ResolveGroupStep.routing_uid(),
+ "step": ResolveGroupStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,
+ "block_action_type": scenario_step.BLOCK_ACTION_TYPE_BUTTON,
+ "block_action_id": UnResolveGroupStep.routing_uid(),
+ "step": UnResolveGroupStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_INTERACTIVE_MESSAGE,
+ "action_type": scenario_step.ACTION_TYPE_BUTTON,
+ "action_name": AcknowledgeGroupStep.routing_uid(),
+ "step": AcknowledgeGroupStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,
+ "block_action_type": scenario_step.BLOCK_ACTION_TYPE_BUTTON,
+ "block_action_id": AcknowledgeGroupStep.routing_uid(),
+ "step": AcknowledgeGroupStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_INTERACTIVE_MESSAGE,
+ "action_type": scenario_step.ACTION_TYPE_BUTTON,
+ "action_name": AcknowledgeConfirmationStep.routing_uid(),
+ "step": AcknowledgeConfirmationStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_INTERACTIVE_MESSAGE,
+ "action_type": scenario_step.ACTION_TYPE_BUTTON,
+ "action_name": UnAcknowledgeGroupStep.routing_uid(),
+ "step": UnAcknowledgeGroupStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,
+ "block_action_type": scenario_step.BLOCK_ACTION_TYPE_BUTTON,
+ "block_action_id": UnAcknowledgeGroupStep.routing_uid(),
+ "step": UnAcknowledgeGroupStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_INTERACTIVE_MESSAGE,
+ "action_type": scenario_step.ACTION_TYPE_SELECT,
+ "action_name": SilenceGroupStep.routing_uid(),
+ "step": SilenceGroupStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,
+ "block_action_type": scenario_step.BLOCK_ACTION_TYPE_STATIC_SELECT,
+ "block_action_id": SilenceGroupStep.routing_uid(),
+ "step": SilenceGroupStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_INTERACTIVE_MESSAGE,
+ "action_type": scenario_step.ACTION_TYPE_BUTTON,
+ "action_name": UnSilenceGroupStep.routing_uid(),
+ "step": UnSilenceGroupStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,
+ "block_action_type": scenario_step.BLOCK_ACTION_TYPE_BUTTON,
+ "block_action_id": UnSilenceGroupStep.routing_uid(),
+ "step": UnSilenceGroupStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,
+ "block_action_type": scenario_step.BLOCK_ACTION_TYPE_BUTTON,
+ "block_action_id": SelectAttachGroupStep.routing_uid(),
+ "step": SelectAttachGroupStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_INTERACTIVE_MESSAGE,
+ "action_type": scenario_step.ACTION_TYPE_SELECT,
+ "action_name": AttachGroupStep.routing_uid(),
+ "step": AttachGroupStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_VIEW_SUBMISSION,
+ "view_callback_id": AttachGroupStep.routing_uid(),
+ "step": AttachGroupStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,
+ "block_action_type": scenario_step.BLOCK_ACTION_TYPE_STATIC_SELECT,
+ "block_action_id": AttachGroupStep.routing_uid(),
+ "step": AttachGroupStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_INTERACTIVE_MESSAGE,
+ "action_type": scenario_step.ACTION_TYPE_BUTTON,
+ "action_name": UnAttachGroupStep.routing_uid(),
+ "step": UnAttachGroupStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_INTERACTIVE_MESSAGE,
+ "action_type": scenario_step.ACTION_TYPE_SELECT,
+ "action_name": InviteOtherPersonToIncident.routing_uid(),
+ "step": InviteOtherPersonToIncident,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,
+ "block_action_type": scenario_step.BLOCK_ACTION_TYPE_USERS_SELECT,
+ "block_action_id": InviteOtherPersonToIncident.routing_uid(),
+ "step": InviteOtherPersonToIncident,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,
+ "block_action_type": scenario_step.BLOCK_ACTION_TYPE_STATIC_SELECT,
+ "block_action_id": InviteOtherPersonToIncident.routing_uid(),
+ "step": InviteOtherPersonToIncident,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_INTERACTIVE_MESSAGE,
+ "action_type": scenario_step.ACTION_TYPE_BUTTON,
+ "action_name": StopInvitationProcess.routing_uid(),
+ "step": StopInvitationProcess,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_INTERACTIVE_MESSAGE,
+ "action_type": scenario_step.ACTION_TYPE_BUTTON,
+ "action_name": CustomButtonProcessStep.routing_uid(),
+ "step": CustomButtonProcessStep,
+ },
+]
diff --git a/engine/apps/slack/scenarios/escalation_delivery.py b/engine/apps/slack/scenarios/escalation_delivery.py
new file mode 100644
index 0000000000..e999afd320
--- /dev/null
+++ b/engine/apps/slack/scenarios/escalation_delivery.py
@@ -0,0 +1,47 @@
+import humanize
+from django.apps import apps
+
+from apps.slack.scenarios import scenario_step
+
+
+class EscalationDeliveryStep(scenario_step.ScenarioStep):
+ """
+ used for user group and channel notification in slack
+ """
+
+ def get_user_notification_message_for_thread_for_usergroup(self, user, notification_policy):
+ UserNotificationPolicy = apps.get_model("base", "UserNotificationPolicy")
+ notification_channel = notification_policy.notify_by
+ notification_step = notification_policy.step
+ user_verbal = user.get_user_verbal_for_team_for_slack()
+ user_verbal_with_mention = user.get_user_verbal_for_team_for_slack(mention=True)
+
+ if (
+ notification_channel == UserNotificationPolicy.NotificationChannel.SLACK
+ and notification_step == UserNotificationPolicy.Step.NOTIFY
+ ):
+ # Mention if asked to notify by slack
+ user_mention_as = user_verbal_with_mention
+ notify_by = ""
+ elif notification_step == UserNotificationPolicy.Step.WAIT:
+ user_mention_as = user_verbal
+ if notification_policy.wait_delay is not None:
+ notify_by = " in {}".format(format(humanize.naturaldelta(notification_policy.wait_delay)))
+ else:
+ notify_by = ""
+ else:
+ # Don't mention if asked to notify somehow else but drop a note for colleagues
+ user_mention_as = user_verbal
+ notify_by = " by {}".format(UserNotificationPolicy.NotificationChannel(notification_channel).label)
+ return "Inviting {}{} to look at incident.".format(user_mention_as, notify_by)
+
+ def notify_thread_about_action(self, alert_group, text, footer=None, color=None):
+ attachments = [
+ {
+ "callback_id": "alert",
+ "footer": footer,
+ "text": text,
+ "color": color,
+ },
+ ]
+ self._publish_message_to_thread(alert_group, attachments)
diff --git a/engine/apps/slack/scenarios/notification_delivery.py b/engine/apps/slack/scenarios/notification_delivery.py
new file mode 100644
index 0000000000..3d04a3524f
--- /dev/null
+++ b/engine/apps/slack/scenarios/notification_delivery.py
@@ -0,0 +1,96 @@
+from django.apps import apps
+
+from apps.slack.scenarios import scenario_step
+from apps.slack.slack_client.exceptions import SlackAPIException, SlackAPITokenException
+
+
+class NotificationDeliveryStep(scenario_step.ScenarioStep):
+ def process_signal(self, log_record):
+ UserNotificationPolicy = apps.get_model("base", "UserNotificationPolicy")
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+
+ user = log_record.author
+ alert_group = log_record.alert_group
+
+ user_verbal_with_mention = user.get_user_verbal_for_team_for_slack(mention=True)
+
+ # move message generation to UserNotificationPolicyLogRecord
+ if log_record.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED:
+ if log_record.notification_error_code in UserNotificationPolicyLogRecord.ERRORS_TO_SEND_IN_SLACK_CHANNEL:
+ if (
+ log_record.notification_error_code
+ == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_SMS_LIMIT_EXCEEDED
+ ):
+ self.post_message_to_channel(
+ f"Attempt to send an SMS to {user_verbal_with_mention} has been failed due to a plan limit",
+ alert_group.slack_message.channel_id,
+ color="red",
+ )
+ elif (
+ log_record.notification_error_code
+ == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_PHONE_CALLS_LIMIT_EXCEEDED
+ ):
+ self.post_message_to_channel(
+ f"Attempt to call to {user_verbal_with_mention} has been failed due to a plan limit",
+ alert_group.slack_message.channel_id,
+ color="red",
+ )
+ elif (
+ log_record.notification_error_code
+ == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_MAIL_LIMIT_EXCEEDED
+ ):
+ self.post_message_to_channel(
+ f"Failed to send email to {user_verbal_with_mention}. Exceeded limit for mails",
+ alert_group.slack_message.channel_id,
+ color="red",
+ )
+ elif (
+ log_record.notification_error_code
+ == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_PHONE_NUMBER_IS_NOT_VERIFIED
+ ):
+ if log_record.notification_channel == UserNotificationPolicy.NotificationChannel.SMS:
+ self.post_message_to_channel(
+ f"Failed to send an SMS to {user_verbal_with_mention}. Phone number is not verified",
+ alert_group.slack_message.channel_id,
+ color="red",
+ )
+ elif log_record.notification_channel == UserNotificationPolicy.NotificationChannel.PHONE_CALL:
+ self.post_message_to_channel(
+ f"Failed to call to {user_verbal_with_mention}. Phone number is not verified",
+ alert_group.slack_message.channel_id,
+ color="red",
+ )
+
+ def post_message_to_channel(self, text, channel, color=None, footer=None):
+ color_id = self.get_color_id(color)
+ attachments = [
+ {"color": color_id, "callback_id": "alert", "footer": footer, "text": text},
+ ]
+ try:
+ # TODO: slack-onprem, check exceptions
+ self._slack_client.api_call(
+ "chat.postMessage",
+ channel=channel,
+ attachments=attachments,
+ unfurl_links=True,
+ )
+ except SlackAPITokenException as e:
+ print(e)
+ except SlackAPIException as e:
+ if e.response["error"] == "channel_not_found":
+ pass
+ elif e.response["error"] == "is_archived":
+ pass
+ elif e.response["error"] == "invalid_auth":
+ print(e)
+ else:
+ raise e
+
+ def get_color_id(self, color):
+ if color == "red":
+ color_id = "#FF0000"
+ elif color == "yellow":
+ color_id = "#c6c000"
+ else:
+ color_id = color
+ return color_id
diff --git a/engine/apps/slack/scenarios/onboarding.py b/engine/apps/slack/scenarios/onboarding.py
new file mode 100644
index 0000000000..0caf0a67c4
--- /dev/null
+++ b/engine/apps/slack/scenarios/onboarding.py
@@ -0,0 +1,43 @@
+import logging
+
+from apps.slack.scenarios import scenario_step
+
+logger = logging.getLogger(__name__)
+
+
+class ImOpenStep(scenario_step.ScenarioStep):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_TRIGGERED_BY_SYSTEM,
+ ]
+
+ """
+ Empty step to handle event and avoid 500's. In case we need it in the future.
+ """
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ logger.info("InOpenStep, doing nothing.")
+
+
+class AppHomeOpenedStep(scenario_step.ScenarioStep):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_TRIGGERED_BY_SYSTEM,
+ ]
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ pass
+
+
+STEPS_ROUTING = [
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_EVENT_CALLBACK,
+ "event_type": scenario_step.EVENT_TYPE_IM_OPEN,
+ "step": ImOpenStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_EVENT_CALLBACK,
+ "event_type": scenario_step.EVENT_TYPE_APP_HOME_OPENED,
+ "step": AppHomeOpenedStep,
+ },
+]
diff --git a/engine/apps/slack/scenarios/profile_update.py b/engine/apps/slack/scenarios/profile_update.py
new file mode 100644
index 0000000000..3fe2b0e621
--- /dev/null
+++ b/engine/apps/slack/scenarios/profile_update.py
@@ -0,0 +1,55 @@
+from apps.slack.constants import SLACK_BOT_ID
+from apps.slack.scenarios import scenario_step
+
+
+class ProfileUpdateStep(scenario_step.ScenarioStep):
+ tags = [
+ scenario_step.ScenarioStep.TAG_TRIGGERED_BY_SYSTEM,
+ ]
+ # Avoid logging this step to prevent collecting sensitive data of our customers
+ need_to_be_logged = False
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ """
+ Triggered by action: Any update in Slack Profile.
+ Dangerous because it's often triggered by internal client's company systems.
+ May cause flood, should be ready to useless updates.
+ """
+
+ member = payload["event"]["user"]
+ slack_user_identity.profile_real_name = member.get("profile").get("real_name", None)
+ slack_user_identity.profile_real_name_normalized = member.get("profile").get("real_name_normalized", None)
+ slack_user_identity.profile_display_name = member.get("profile").get("display_name", None)
+ slack_user_identity.profile_display_name_normalized = member.get("profile").get("display_name_normalized", None)
+ slack_user_identity.cached_avatar = member.get("profile").get("image_512", None)
+ slack_user_identity.cached_slack_email = member.get("profile").get("email", "")
+ slack_user_identity.cached_timezone = member.get("tz", None)
+
+ updated_phone_number = payload["event"]["user"]["profile"].get("phone", None)
+ # if phone number was changed - drop cached number
+ if updated_phone_number is None or updated_phone_number != slack_user_identity.cached_phone_number:
+ slack_user_identity.cached_phone_number = None
+ slack_user_identity.cached_country_code = None
+
+ slack_user_identity.deleted = member.get("deleted", None)
+ slack_user_identity.is_admin = member.get("is_admin", None)
+ slack_user_identity.is_owner = member.get("is_owner", None)
+ slack_user_identity.is_primary_owner = member.get("is_primary_owner", None)
+ slack_user_identity.is_restricted = member.get("is_restricted", None)
+ slack_user_identity.is_ultra_restricted = member.get("is_ultra_restricted", None)
+ if slack_user_identity.slack_id == SLACK_BOT_ID:
+ slack_user_identity.cached_is_bot = True
+ else:
+ slack_user_identity.cached_is_bot = member.get("is_bot", None)
+ slack_user_identity.is_app_user = member.get("is_app_user", None)
+
+ slack_user_identity.save()
+
+
+STEPS_ROUTING = [
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_EVENT_CALLBACK,
+ "event_type": scenario_step.EVENT_TYPE_USER_CHANGE,
+ "step": ProfileUpdateStep,
+ },
+]
diff --git a/engine/apps/slack/scenarios/public_menu.py b/engine/apps/slack/scenarios/public_menu.py
new file mode 100644
index 0000000000..c11a7f7509
--- /dev/null
+++ b/engine/apps/slack/scenarios/public_menu.py
@@ -0,0 +1,536 @@
+import json
+import logging
+
+from django.apps import apps
+from django.conf import settings
+from django.http import JsonResponse
+from django.utils import timezone
+
+from apps.slack.scenarios import scenario_step
+from apps.slack.slack_client import SlackClientWithErrorHandling
+from apps.slack.slack_client.exceptions import SlackAPIException
+
+from .step_mixins import CheckAlertIsUnarchivedMixin
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+class InvitedToChannelStep(scenario_step.ScenarioStep):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_TRIGGERED_BY_SYSTEM,
+ ]
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ if payload["event"]["user"] == slack_team_identity.bot_user_id:
+ channel_id = payload["event"]["channel"]
+ slack_client = SlackClientWithErrorHandling(slack_team_identity.bot_access_token)
+ channel = slack_client.api_call("conversations.info", channel=channel_id)["channel"]
+
+ slack_team_identity.cached_channels.update_or_create(
+ slack_id=channel["id"],
+ defaults={
+ "name": channel["name"],
+ "is_archived": channel["is_archived"],
+ "is_shared": channel["is_shared"],
+ "last_populated": timezone.now().date(),
+ },
+ )
+ else:
+ logger.info("Other user was invited to a channel with a bot.")
+
+
+class CloseEphemeralButtonStep(scenario_step.ScenarioStep):
+
+ random_prefix_for_routing = "qwe2id"
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ return JsonResponse({"response_type": "ephemeral", "delete_original": True})
+
+
+class CreateIncidentManuallyStep(scenario_step.ScenarioStep):
+ command_name = [settings.SLACK_SLASH_COMMAND_NAME]
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ TITLE_INPUT_BLOCK_ID = "TITLE_INPUT"
+ MESSAGE_INPUT_BLOCK_ID = "MESSAGE_INPUT"
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ try:
+ channel_id = payload["event"]["channel"]
+ except KeyError:
+ channel_id = payload["channel_id"]
+
+ blocks = self.get_create_incident_blocks(payload, slack_team_identity, slack_user_identity)
+
+ view = {
+ "type": "modal",
+ "callback_id": FinishCreateIncidentViewStep.routing_uid(),
+ "title": {
+ "type": "plain_text",
+ "text": "Create an Incident",
+ },
+ "close": {
+ "type": "plain_text",
+ "text": "Cancel",
+ "emoji": True,
+ },
+ "submit": {
+ "type": "plain_text",
+ "text": "Submit",
+ },
+ "blocks": blocks,
+ "private_metadata": json.dumps({"channel_id": channel_id}),
+ }
+ self._slack_client.api_call(
+ "views.open",
+ trigger_id=payload["trigger_id"],
+ view=view,
+ )
+
+ def get_create_incident_blocks(self, payload, slack_team_identity, slack_user_identity):
+ blocks = []
+ organization_selection_block = self.get_select_organization_route_element(
+ slack_team_identity, slack_user_identity
+ )
+ title_incident_block = {
+ "type": "input",
+ "block_id": self.TITLE_INPUT_BLOCK_ID,
+ "label": {
+ "type": "plain_text",
+ "text": "Title:",
+ },
+ "element": {
+ "type": "plain_text_input",
+ "action_id": FinishCreateIncidentViewStep.routing_uid(),
+ "placeholder": {
+ "type": "plain_text",
+ "text": " ",
+ },
+ },
+ }
+ if payload.get("text", None) is not None:
+ title_incident_block["element"]["initial_value"] = payload["text"]
+ message_incident_block = {
+ "type": "input",
+ "block_id": self.MESSAGE_INPUT_BLOCK_ID,
+ "label": {
+ "type": "plain_text",
+ "text": "Message:",
+ },
+ "element": {
+ "type": "plain_text_input",
+ "action_id": FinishCreateIncidentViewStep.routing_uid(),
+ "multiline": True,
+ "placeholder": {
+ "type": "plain_text",
+ "text": " ",
+ },
+ },
+ "optional": True,
+ }
+ if payload.get("message", {}).get("text") is not None:
+ message_incident_block["element"]["initial_value"] = payload["message"]["text"]
+
+ blocks.append(organization_selection_block)
+ blocks.append(title_incident_block)
+ blocks.append(message_incident_block)
+ return blocks
+
+
+class FinishCreateIncidentViewStep(scenario_step.ScenarioStep):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+ ChannelFilter = apps.get_model("alerts", "ChannelFilter")
+
+ Alert = apps.get_model("alerts", "Alert")
+ payload_values = payload["view"]["state"]["values"]
+ title = payload_values[CreateIncidentManuallyStep.TITLE_INPUT_BLOCK_ID][self.routing_uid()]["value"]
+ text = payload_values[CreateIncidentManuallyStep.MESSAGE_INPUT_BLOCK_ID][self.routing_uid()]["value"] or ""
+
+ private_metadata = json.loads(payload["view"]["private_metadata"])
+ # update private metadata in payload to use it in alert rendering
+ payload["view"]["private_metadata"] = private_metadata
+
+ channel_id = private_metadata["channel_id"]
+
+ alert_receive_channel = AlertReceiveChannel.get_or_create_manual_integration(
+ organization=self.organization,
+ integration=AlertReceiveChannel.INTEGRATION_MANUAL,
+ deleted_at=None,
+ defaults={"author": self.user},
+ )
+ try:
+ self._slack_client.api_call(
+ "chat.postEphemeral",
+ channel=channel_id,
+ user=slack_user_identity.slack_id,
+ text=":white_check_mark: Alert *{}* successfully submitted".format(title),
+ )
+ except SlackAPIException as e:
+ if e.response["error"] == "channel_not_found":
+ self._slack_client.api_call(
+ "chat.postEphemeral",
+ channel=slack_user_identity.im_channel_id,
+ user=slack_user_identity.slack_id,
+ text=":white_check_mark: Alert *{}* successfully submitted".format(title),
+ )
+ else:
+ raise e
+ user_verbal = self.user.get_user_verbal_for_team_for_slack()
+ channel_filter_pk = payload["view"]["state"]["values"][
+ scenario_step.ScenarioStep.SELECT_ORGANIZATION_AND_ROUTE_BLOCK_ID
+ ][scenario_step.ScenarioStep.SELECT_ORGANIZATION_AND_ROUTE_BLOCK_ID]["selected_option"]["value"].split("-")[1]
+ channel_filter = ChannelFilter.objects.get(pk=channel_filter_pk)
+ Alert.create(
+ title=title,
+ message="{} created by {}".format(
+ text,
+ user_verbal,
+ ),
+ image_url=None,
+ link_to_upstream_details=None,
+ alert_receive_channel=alert_receive_channel,
+ raw_request_data=payload,
+ integration_unique_data={
+ "created_by": user_verbal,
+ },
+ force_route_id=channel_filter.pk,
+ )
+
+
+class CreateIncidentSubmenuStep(scenario_step.ScenarioStep):
+ callback_id = [
+ "incident_create",
+ "incident_create_staging",
+ "incident_create_develop",
+ ]
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ try:
+ image_url = payload["message"]["files"][0]["permalink"]
+ except KeyError:
+ image_url = None
+ channel_id = payload["channel"]["id"]
+
+ private_metadata = {
+ "channel_id": channel_id,
+ "image_url": image_url,
+ "message": {
+ "user": payload["message"].get("user"),
+ "text": payload["message"].get("text"),
+ "ts": payload["message"].get("ts"),
+ },
+ }
+
+ organization_selection_block = self.get_select_organization_route_element(
+ slack_team_identity, slack_user_identity
+ )
+ view = {
+ "type": "modal",
+ "callback_id": FinishCreateIncidentSubmenuStep.routing_uid(),
+ "title": {
+ "type": "plain_text",
+ "text": "Create an Incident",
+ },
+ "close": {
+ "type": "plain_text",
+ "text": "Cancel",
+ "emoji": True,
+ },
+ "submit": {
+ "type": "plain_text",
+ "text": "Submit",
+ },
+ "blocks": [organization_selection_block],
+ "private_metadata": json.dumps(private_metadata),
+ }
+ self._slack_client.api_call(
+ "views.open",
+ trigger_id=payload["trigger_id"],
+ view=view,
+ )
+
+
+class FinishCreateIncidentSubmenuStep(scenario_step.ScenarioStep):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+ Alert = apps.get_model("alerts", "Alert")
+
+ private_metadata = json.loads(payload["view"]["private_metadata"])
+ # update private metadata in payload to use it in alert rendering
+ payload["view"]["private_metadata"] = private_metadata
+
+ channel_id = private_metadata["channel_id"]
+ author = private_metadata["message"]["user"]
+
+ alert_receive_channel = AlertReceiveChannel.get_or_create_manual_integration(
+ organization=self.organization,
+ integration=AlertReceiveChannel.INTEGRATION_MANUAL,
+ deleted_at=None,
+ defaults={"author": self.user},
+ )
+
+ author_username = "Unknown"
+ if author:
+ try:
+ author_username = self._slack_client.api_call(
+ "users.info",
+ user=author,
+ )
+ author_username = author_username.get("user", {}).get("real_name", None)
+ except SlackAPIException:
+ pass
+ payload["view"]["private_metadata"]["author_username"] = author_username
+
+ try:
+ permalink = self._slack_client.api_call(
+ "chat.getPermalink",
+ channel=private_metadata["channel_id"],
+ message_ts=private_metadata["message"]["ts"],
+ )
+ permalink = permalink.get("permalink", None)
+ except SlackAPIException:
+ permalink = None
+
+ permalink = "<{}|Original message...>".format(permalink) if permalink is not None else ""
+ Alert.create(
+ title="Message from {}".format(author_username),
+ message="{}\n{}".format(private_metadata["message"]["text"], permalink),
+ image_url=private_metadata["image_url"],
+ # Link to the slack message is not here bc it redirects to browser
+ link_to_upstream_details=None,
+ alert_receive_channel=alert_receive_channel,
+ raw_request_data=payload,
+ integration_unique_data={"created_by": self.user.get_user_verbal_for_team_for_slack()},
+ )
+ try:
+ self._slack_client.api_call(
+ "chat.postEphemeral",
+ channel=channel_id,
+ user=slack_user_identity.slack_id,
+ text=":white_check_mark: Alert successfully submitted",
+ )
+ except SlackAPIException as e:
+ if e.response["error"] == "channel_not_found" or e.response["error"] == "user_not_in_channel":
+ self._slack_client.api_call(
+ "chat.postEphemeral",
+ channel=slack_user_identity.im_channel_id,
+ user=slack_user_identity.slack_id,
+ text=":white_check_mark: Alert successfully submitted",
+ )
+ else:
+ raise e
+
+
+class AddToResolutionoteStep(CheckAlertIsUnarchivedMixin, scenario_step.ScenarioStep):
+ callback_id = [
+ "add_resolution_note",
+ "add_resolution_note_staging",
+ "add_resolution_note_develop",
+ ]
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ SlackMessage = apps.get_model("slack", "SlackMessage")
+ ResolutionNoteSlackMessage = apps.get_model("alerts", "ResolutionNoteSlackMessage")
+ ResolutionNote = apps.get_model("alerts", "ResolutionNote")
+ SlackUserIdentity = apps.get_model("slack", "SlackUserIdentity")
+
+ try:
+ channel_id = payload["channel"]["id"]
+ except KeyError:
+ raise Exception("Channel was not found")
+
+ if self.organization and self.organization.general_log_channel_id is None:
+ try:
+ return self._slack_client.api_call(
+ "chat.postEphemeral",
+ channel=channel_id,
+ user=slack_user_identity.slack_id,
+ attachments=CreateIncidentSubmenuStep.finish_configuration_attachments(self.organization),
+ )
+ except SlackAPIException as e:
+ if e.response["error"] == "channel_not_found" or e.response["error"] == "user_not_in_channel":
+ return self._slack_client.api_call(
+ "chat.postEphemeral",
+ channel=slack_user_identity.im_channel_id,
+ user=slack_user_identity.slack_id,
+ attachments=CreateIncidentSubmenuStep.finish_configuration_attachments(self.organization),
+ )
+ else:
+ raise e
+
+ warning_text = "Unable to add this message to resolution note, this command works only in incident threads."
+
+ try:
+ slack_message = SlackMessage.objects.get(
+ slack_id=payload["message"]["thread_ts"],
+ _slack_team_identity=slack_team_identity,
+ channel_id=channel_id,
+ )
+ except KeyError:
+ self.open_warning_window(payload, warning_text)
+ return
+ except SlackMessage.DoesNotExist:
+ self.open_warning_window(payload, warning_text)
+ return
+
+ try:
+ alert_group = slack_message.get_alert_group()
+ except SlackMessage.alert.RelatedObjectDoesNotExist as e:
+ self.open_warning_window(payload, warning_text)
+ print(
+ f"Exception: tried to add message from thread to Resolution Note: "
+ f"Slack Team Identity pk: {self.slack_team_identity.pk}, "
+ f"Slack Message id: {slack_message.slack_id}"
+ )
+ raise e
+
+ if not self.check_alert_is_unarchived(slack_team_identity, payload, alert_group):
+ return
+
+ if payload["message"]["type"] == "message" and "user" in payload["message"]:
+ message_ts = payload["message_ts"]
+ thread_ts = payload["message"]["thread_ts"]
+
+ result = self._slack_client.api_call(
+ "chat.getPermalink",
+ channel=channel_id,
+ message_ts=message_ts,
+ )
+ permalink = None
+ if result["permalink"] is not None:
+ permalink = result["permalink"]
+
+ if payload["message"]["ts"] in [
+ message.ts
+ for message in alert_group.resolution_note_slack_messages.filter(added_to_resolution_note=True)
+ ]:
+ warning_text = "Unable to add the same message again."
+ self.open_warning_window(payload, warning_text)
+ return
+
+ elif len(payload["message"]["text"]) > 2900:
+ warning_text = (
+ "Unable to add the message to Resolution note: the message is too long ({}). "
+ "Max length - 2900 symbols.".format(len(payload["message"]["text"]))
+ )
+ self.open_warning_window(payload, warning_text)
+ return
+
+ else:
+ try:
+ resolution_note_slack_message = ResolutionNoteSlackMessage.objects.get(
+ ts=message_ts, thread_ts=thread_ts
+ )
+ except ResolutionNoteSlackMessage.DoesNotExist:
+ text = payload["message"]["text"]
+ text = text.replace("```", "")
+ slack_message = SlackMessage.objects.get(
+ slack_id=thread_ts,
+ _slack_team_identity=slack_team_identity,
+ channel_id=channel_id,
+ )
+ alert_group = slack_message.get_alert_group()
+ author_slack_user_identity = SlackUserIdentity.objects.get(
+ slack_id=payload["message"]["user"], slack_team_identity=slack_team_identity
+ )
+ author_user = self.organization.users.get(slack_user_identity=author_slack_user_identity)
+ resolution_note_slack_message = ResolutionNoteSlackMessage(
+ alert_group=alert_group,
+ user=author_user,
+ added_by_user=self.user,
+ text=text,
+ slack_channel_id=channel_id,
+ thread_ts=thread_ts,
+ ts=message_ts,
+ permalink=permalink,
+ )
+ resolution_note_slack_message.added_to_resolution_note = True
+ resolution_note_slack_message.save()
+ resolution_note = resolution_note_slack_message.get_resolution_note()
+ if resolution_note is None:
+ ResolutionNote(
+ alert_group=alert_group,
+ author=resolution_note_slack_message.user,
+ source=ResolutionNote.Source.SLACK,
+ resolution_note_slack_message=resolution_note_slack_message,
+ ).save()
+ else:
+ resolution_note.recreate()
+ alert_group.drop_cached_after_resolve_report_json()
+ alert_group.schedule_cache_for_web()
+ try:
+ self._slack_client.api_call(
+ "reactions.add",
+ channel=channel_id,
+ name="memo",
+ timestamp=resolution_note_slack_message.ts,
+ )
+ except SlackAPIException:
+ pass
+
+ self._update_slack_message(alert_group)
+ else:
+ warning_text = "Unable to add this message to resolution note."
+ self.open_warning_window(payload, warning_text)
+ return
+
+
+STEPS_ROUTING = [
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_SLASH_COMMAND,
+ "command_name": CreateIncidentManuallyStep.command_name,
+ "step": CreateIncidentManuallyStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_EVENT_CALLBACK,
+ "event_type": scenario_step.EVENT_TYPE_MEMBER_JOINED_CHANNEL,
+ "step": InvitedToChannelStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_INTERACTIVE_MESSAGE,
+ "action_type": scenario_step.ACTION_TYPE_BUTTON,
+ "action_name": CloseEphemeralButtonStep.routing_uid(),
+ "step": CloseEphemeralButtonStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_VIEW_SUBMISSION,
+ "view_callback_id": FinishCreateIncidentViewStep.routing_uid(),
+ "step": FinishCreateIncidentViewStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_VIEW_SUBMISSION,
+ "view_callback_id": FinishCreateIncidentSubmenuStep.routing_uid(),
+ "step": FinishCreateIncidentSubmenuStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_MESSAGE_ACTION,
+ "message_action_callback_id": CreateIncidentSubmenuStep.callback_id,
+ "step": CreateIncidentSubmenuStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_MESSAGE_ACTION,
+ "message_action_callback_id": AddToResolutionoteStep.callback_id,
+ "step": AddToResolutionoteStep,
+ },
+]
diff --git a/engine/apps/slack/scenarios/resolution_note.py b/engine/apps/slack/scenarios/resolution_note.py
new file mode 100644
index 0000000000..a82ccb69a1
--- /dev/null
+++ b/engine/apps/slack/scenarios/resolution_note.py
@@ -0,0 +1,607 @@
+import json
+import logging
+from urllib.parse import urljoin
+
+from django.apps import apps
+from django.conf import settings
+from django.db.models import Q
+from django.utils import timezone
+
+from apps.slack.scenarios import scenario_step
+from apps.slack.slack_client.exceptions import SlackAPIException
+
+from .step_mixins import CheckAlertIsUnarchivedMixin
+
+logger = logging.getLogger(__name__)
+
+
+class UpdateResolutionNoteStep(scenario_step.ScenarioStep):
+ def process_signal(self, alert_group, resolution_note):
+ if resolution_note.deleted_at:
+ self.remove_resolution_note_slack_message(resolution_note)
+ else:
+ self.post_or_update_resolution_note_in_thread(resolution_note)
+
+ self.update_alert_group_resolution_note_button(
+ alert_group=alert_group,
+ )
+
+ def remove_resolution_note_slack_message(self, resolution_note):
+ resolution_note_slack_message = resolution_note.resolution_note_slack_message
+ if resolution_note_slack_message is not None:
+ resolution_note_slack_message.added_to_resolution_note = False
+ resolution_note_slack_message.save(update_fields=["added_to_resolution_note"])
+ if resolution_note_slack_message.posted_by_bot:
+ try:
+ self._slack_client.api_call(
+ "chat.delete",
+ channel=resolution_note_slack_message.slack_channel_id,
+ ts=resolution_note_slack_message.ts,
+ )
+ except SlackAPIException as e:
+ if e.response["error"] == "channel_not_found":
+ logger.warning(
+ f"Unable to delete resolution note message in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'channel_not_found'"
+ )
+ elif e.response["error"] == "message_not_found":
+ logger.warning(
+ f"Unable to delete resolution note message in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'message_not_found'"
+ )
+ elif e.response["error"] == "is_archived":
+ logger.warning(
+ f"Unable to delete resolution note message in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'is_archived'"
+ )
+ elif e.response["error"] == "invalid_auth":
+ logger.warning(
+ f"Unable to delete resolution note message in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'invalid_auth'"
+ )
+ elif e.response["error"] == "is_inactive":
+ logger.warning(
+ f"Unable to delete resolution note message in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'is_inactive'"
+ )
+ else:
+ raise e
+ else:
+ self.remove_resolution_note_reaction(resolution_note_slack_message)
+
+ def post_or_update_resolution_note_in_thread(self, resolution_note):
+ ResolutionNoteSlackMessage = apps.get_model("alerts", "ResolutionNoteSlackMessage")
+ resolution_note_slack_message = resolution_note.resolution_note_slack_message
+ alert_group = resolution_note.alert_group
+ alert_group_slack_message = alert_group.slack_message
+ blocks = self.get_resolution_note_blocks(resolution_note)
+
+ if resolution_note_slack_message is None:
+ try:
+ result = self._slack_client.api_call(
+ "chat.postMessage",
+ channel=alert_group_slack_message.channel_id,
+ thread_ts=alert_group_slack_message.slack_id,
+ text=resolution_note.text,
+ blocks=blocks,
+ )
+ except SlackAPIException as e:
+ if e.response["error"] == "channel_not_found":
+ logger.warning(
+ f"Unable to post resolution note message to slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'channel_not_found'"
+ )
+ elif e.response["error"] == "is_archived":
+ logger.warning(
+ f"Unable to post resolution note message to slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'is_archived'"
+ )
+ elif e.response["error"] == "invalid_auth":
+ logger.warning(
+ f"Unable to post resolution note message to slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'invalid_auth'"
+ )
+ else:
+ raise e
+ else:
+ message_ts = result["message"]["ts"]
+ result_permalink = self._slack_client.api_call(
+ "chat.getPermalink",
+ channel=alert_group_slack_message.channel_id,
+ message_ts=message_ts,
+ )
+
+ resolution_note_slack_message = ResolutionNoteSlackMessage(
+ alert_group=alert_group,
+ user=resolution_note.author,
+ added_by_user=resolution_note.author,
+ text=resolution_note.text,
+ slack_channel_id=alert_group_slack_message.channel_id,
+ thread_ts=result["ts"],
+ ts=message_ts,
+ permalink=result_permalink["permalink"],
+ posted_by_bot=True,
+ added_to_resolution_note=True,
+ )
+ resolution_note_slack_message.save()
+ self.add_resolution_note_reaction(resolution_note_slack_message)
+
+ resolution_note.resolution_note_slack_message = resolution_note_slack_message
+ resolution_note.save(update_fields=["resolution_note_slack_message"])
+ elif resolution_note_slack_message.posted_by_bot:
+ try:
+ self._slack_client.api_call(
+ "chat.update",
+ channel=alert_group_slack_message.channel_id,
+ ts=resolution_note_slack_message.ts,
+ text=resolution_note_slack_message.text,
+ blocks=blocks,
+ )
+ except SlackAPIException as e:
+ if e.response["error"] == "channel_not_found":
+ logger.warning(
+ f"Unable to update resolution note message in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'channel_not_found'"
+ )
+ elif e.response["error"] == "message_not_found":
+ logger.warning(
+ f"Unable to update resolution note message in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'message_not_found'"
+ )
+ elif e.response["error"] == "invalid_auth":
+ logger.warning(
+ f"Unable to update resolution note message in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'invalid_auth'"
+ )
+ elif e.response["error"] == "is_inactive":
+ logger.warning(
+ f"Unable to update resolution note message in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'is_inactive'"
+ )
+ elif e.response["error"] == "account_inactive":
+ logger.warning(
+ f"Unable to update resolution note message in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'account_inactive'"
+ )
+ else:
+ raise e
+ else:
+ resolution_note_slack_message.text = resolution_note.text
+ resolution_note_slack_message.save(update_fields=["text"])
+
+ def update_alert_group_resolution_note_button(self, alert_group):
+ if alert_group.slack_message is not None:
+ self._update_slack_message(alert_group)
+
+ def add_resolution_note_reaction(self, slack_thread_message):
+ try:
+ self._slack_client.api_call(
+ "reactions.add",
+ channel=slack_thread_message.slack_channel_id,
+ name="memo",
+ timestamp=slack_thread_message.ts,
+ )
+ except SlackAPIException as e:
+ print(e) # TODO:770: log instead of print
+
+ def remove_resolution_note_reaction(self, slack_thread_message):
+ try:
+ self._slack_client.api_call(
+ "reactions.remove",
+ channel=slack_thread_message.slack_channel_id,
+ name="memo",
+ timestamp=slack_thread_message.ts,
+ )
+ except SlackAPIException as e:
+ print(e)
+
+ def get_resolution_note_blocks(self, resolution_note):
+ blocks = []
+ author_verbal = resolution_note.author_verbal(mention=True)
+ resolution_note_text_block = {
+ "type": "section",
+ "text": {"type": "plain_text", "text": resolution_note.text, "emoji": True},
+ }
+ blocks.append(resolution_note_text_block)
+ context_block = {
+ "type": "context",
+ "elements": [
+ {
+ "type": "plain_text",
+ "text": f"{author_verbal} resolution note from {resolution_note.get_source_display()}.",
+ "emoji": True,
+ }
+ ],
+ }
+ blocks.append(context_block)
+ return blocks
+
+
+class ResolutionNoteModalStep(CheckAlertIsUnarchivedMixin, scenario_step.ScenarioStep):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ RESOLUTION_NOTE_TEXT_BLOCK_ID = "resolution_note_text"
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None, data=None):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ value = data or json.loads(payload["actions"][0]["value"])
+ resolution_note_window_action = value.get("resolution_note_window_action", "") or value.get("action_value", "")
+ alert_group_pk = value.get("alert_group_pk")
+ action_resolve = value.get("action_resolve", False)
+ channel_id = payload["channel"]["id"] if "channel" in payload else None
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
+
+ if not self.check_alert_is_unarchived(slack_team_identity, payload, alert_group):
+ return
+
+ blocks = []
+
+ if channel_id:
+ members = slack_team_identity.get_conversation_members(self._slack_client, channel_id)
+ if slack_team_identity.bot_user_id not in members:
+ blocks.extend(self.get_invite_bot_tip_blocks(channel_id))
+
+ blocks.extend(
+ self.get_resolution_notes_blocks(
+ alert_group,
+ resolution_note_window_action,
+ action_resolve,
+ )
+ )
+
+ view = {
+ "blocks": blocks,
+ "type": "modal",
+ "title": {
+ "type": "plain_text",
+ "text": "Resolution notes",
+ },
+ "private_metadata": json.dumps(
+ {
+ "organization_id": self.organization.pk if self.organization else alert_group.organization.pk,
+ "alert_group_pk": alert_group_pk,
+ }
+ ),
+ }
+
+ if "update" in resolution_note_window_action:
+ self._slack_client.api_call(
+ "views.update",
+ trigger_id=payload["trigger_id"],
+ view=view,
+ view_id=payload["view"]["id"],
+ )
+ else:
+ self._slack_client.api_call(
+ "views.open",
+ trigger_id=payload["trigger_id"],
+ view=view,
+ )
+
+ def get_resolution_notes_blocks(self, alert_group, resolution_note_window_action, action_resolve):
+ ResolutionNote = apps.get_model("alerts", "ResolutionNote")
+ blocks = []
+
+ other_resolution_notes = alert_group.resolution_notes.filter(~Q(source=ResolutionNote.Source.SLACK))
+ resolution_note_slack_messages = alert_group.resolution_note_slack_messages.filter(posted_by_bot=False)
+ if action_resolve:
+ blocks.extend(
+ [
+ {
+ "type": "divider",
+ },
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": ":warning: You cannot resolve this incident without resolution note.",
+ },
+ },
+ ]
+ )
+
+ if "error" in resolution_note_window_action:
+ blocks.extend(
+ [
+ {
+ "type": "divider",
+ },
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": ":warning: _Oops! You cannot remove this message from resolution notes when incident is "
+ "resolved. Reason: `resolution note is required` setting. Add another message at first._ ",
+ },
+ },
+ ]
+ )
+
+ for message in resolution_note_slack_messages:
+ user_verbal = message.user.get_user_verbal_for_team_for_slack(mention=True)
+ blocks.append(
+ {
+ "type": "divider",
+ }
+ )
+ message_block = {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": "{} \n{}".format(
+ user_verbal,
+ float(message.ts),
+ message.text,
+ ),
+ },
+ "accessory": {
+ "type": "button",
+ "style": "primary" if not message.added_to_resolution_note else "danger",
+ "text": {
+ "type": "plain_text",
+ "text": "Add" if not message.added_to_resolution_note else "Remove",
+ "emoji": True,
+ },
+ "action_id": AddRemoveThreadMessageStep.routing_uid(),
+ "value": json.dumps(
+ {
+ "resolution_note_window_action": "edit",
+ "msg_value": "add" if not message.added_to_resolution_note else "remove",
+ "message_pk": message.pk,
+ "resolution_note_pk": None,
+ "alert_group_pk": alert_group.pk,
+ }
+ ),
+ },
+ }
+ blocks.append(message_block)
+
+ if other_resolution_notes:
+ blocks.extend(
+ [
+ {
+ "type": "divider",
+ },
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": "*Resolution notes from other sources:*",
+ },
+ },
+ ]
+ )
+ for resolution_note in other_resolution_notes:
+ resolution_note_slack_message = resolution_note.resolution_note_slack_message
+ user_verbal = resolution_note.author_verbal(mention=True)
+ message_timestamp = timezone.datetime.timestamp(resolution_note.created_at)
+ blocks.append(
+ {
+ "type": "divider",
+ }
+ )
+ source = "web" if resolution_note.source == ResolutionNote.Source.WEB else "slack"
+ message_block = {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": "{} (from {})\n{}".format(
+ user_verbal,
+ float(message_timestamp),
+ source,
+ resolution_note.message_text,
+ ),
+ },
+ "accessory": {
+ "type": "button",
+ "style": "danger",
+ "text": {
+ "type": "plain_text",
+ "text": "Remove",
+ "emoji": True,
+ },
+ "action_id": AddRemoveThreadMessageStep.routing_uid(),
+ "value": json.dumps(
+ {
+ "resolution_note_window_action": "edit",
+ "msg_value": "remove",
+ "message_pk": None
+ if not resolution_note_slack_message
+ else resolution_note_slack_message.pk,
+ "resolution_note_pk": resolution_note.pk,
+ "alert_group_pk": alert_group.pk,
+ }
+ ),
+ "confirm": {
+ "title": {"type": "plain_text", "text": "Are you sure?"},
+ "text": {
+ "type": "mrkdwn",
+ "text": "This operation will permanently delete this Resolution Note.",
+ },
+ "confirm": {"type": "plain_text", "text": "Delete"},
+ "deny": {
+ "type": "plain_text",
+ "text": "Stop, I've changed my mind!",
+ },
+ "style": "danger",
+ },
+ },
+ }
+
+ blocks.append(message_block)
+
+ if not blocks:
+ # there aren't any resolution notes yet, display a hint instead
+ link_to_instruction = urljoin(settings.BASE_URL, "static/images/postmortem.gif")
+ blocks = [
+ {
+ "type": "divider",
+ },
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": ":bulb: You can add a message to the resolution notes via context menu:",
+ },
+ },
+ {
+ "type": "image",
+ "title": {
+ "type": "plain_text",
+ "text": "Add a resolution note",
+ },
+ "image_url": link_to_instruction,
+ "alt_text": "Add to postmortem context menu",
+ },
+ ]
+
+ return blocks
+
+ def get_invite_bot_tip_blocks(self, channel):
+ link_to_instruction = urljoin(settings.BASE_URL, "static/images/postmortem.gif")
+ blocks = [
+ {
+ "type": "divider",
+ },
+ {
+ "type": "context",
+ "elements": [
+ {
+ "type": "mrkdwn",
+ "text": f":bulb: To include messages from thread to resolution note `/invite` Grafana OnCall to "
+ f"<#{channel}>. Or you can add a message via "
+ f"<{link_to_instruction}|context menu>.",
+ },
+ ],
+ },
+ ]
+ return blocks
+
+
+class ReadEditPostmortemStep(ResolutionNoteModalStep):
+ # Left for backward compatibility with slack messages created before postmortems -> resolution note change
+ pass
+
+
+class AddRemoveThreadMessageStep(UpdateResolutionNoteStep, scenario_step.ScenarioStep):
+
+ tags = [
+ scenario_step.ScenarioStep.TAG_INCIDENT_ROUTINE,
+ ]
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ ResolutionNoteSlackMessage = apps.get_model("alerts", "ResolutionNoteSlackMessage")
+ ResolutionNote = apps.get_model("alerts", "ResolutionNote")
+ value = json.loads(payload["actions"][0]["value"])
+ slack_message_pk = value.get("message_pk")
+ resolution_note_pk = value.get("resolution_note_pk")
+ alert_group_pk = value.get("alert_group_pk")
+ add_to_resolution_note = True if value["msg_value"].startswith("add") else False
+ slack_thread_message = None
+ resolution_note = None
+ drop_ag_cache = False
+
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
+
+ if slack_message_pk is not None:
+ slack_thread_message = ResolutionNoteSlackMessage.objects.get(pk=slack_message_pk)
+ resolution_note = slack_thread_message.get_resolution_note()
+
+ if add_to_resolution_note and slack_thread_message is not None:
+ slack_thread_message.added_to_resolution_note = True
+ slack_thread_message.save(update_fields=["added_to_resolution_note"])
+ if resolution_note is None:
+ ResolutionNote(
+ alert_group=alert_group,
+ author=slack_thread_message.user,
+ source=ResolutionNote.Source.SLACK,
+ resolution_note_slack_message=slack_thread_message,
+ ).save()
+ else:
+ resolution_note.recreate()
+ self.add_resolution_note_reaction(slack_thread_message)
+ drop_ag_cache = True
+ elif not add_to_resolution_note:
+ # Check if resolution_note can be removed
+ if (
+ self.organization.is_resolution_note_required
+ and alert_group.resolved
+ and alert_group.resolution_notes.count() == 1
+ ):
+ # Show error message
+ resolution_note_data = json.loads(payload["actions"][0]["value"])
+ resolution_note_data["resolution_note_window_action"] = "edit_update_error"
+ return ResolutionNoteModalStep(slack_team_identity).process_scenario(
+ slack_user_identity,
+ slack_team_identity,
+ payload,
+ data=resolution_note_data,
+ )
+ else:
+ if resolution_note_pk is not None and resolution_note is None: # old version of step
+ resolution_note = ResolutionNote.objects.get(pk=resolution_note_pk)
+ resolution_note.delete()
+ if slack_thread_message:
+ slack_thread_message.added_to_resolution_note = False
+ slack_thread_message.save(update_fields=["added_to_resolution_note"])
+ self.remove_resolution_note_reaction(slack_thread_message)
+ drop_ag_cache = True
+ self.update_alert_group_resolution_note_button(
+ alert_group,
+ )
+ if drop_ag_cache:
+ alert_group.drop_cached_after_resolve_report_json()
+ alert_group.schedule_cache_for_web()
+ resolution_note_data = json.loads(payload["actions"][0]["value"])
+ resolution_note_data["resolution_note_window_action"] = "edit_update"
+ ResolutionNoteModalStep(slack_team_identity, self.organization, self.user).process_scenario(
+ slack_user_identity,
+ slack_team_identity,
+ payload,
+ data=resolution_note_data,
+ )
+
+
+STEPS_ROUTING = [
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,
+ "block_action_type": scenario_step.BLOCK_ACTION_TYPE_BUTTON,
+ "block_action_id": ReadEditPostmortemStep.routing_uid(),
+ "step": ReadEditPostmortemStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,
+ "block_action_type": scenario_step.BLOCK_ACTION_TYPE_BUTTON,
+ "block_action_id": ResolutionNoteModalStep.routing_uid(),
+ "step": ResolutionNoteModalStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_INTERACTIVE_MESSAGE,
+ "action_type": scenario_step.ACTION_TYPE_BUTTON,
+ "action_name": ResolutionNoteModalStep.routing_uid(),
+ "step": ResolutionNoteModalStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,
+ "block_action_type": scenario_step.BLOCK_ACTION_TYPE_BUTTON,
+ "block_action_id": AddRemoveThreadMessageStep.routing_uid(),
+ "step": AddRemoveThreadMessageStep,
+ },
+]
diff --git a/engine/apps/slack/scenarios/scenario_step.py b/engine/apps/slack/scenarios/scenario_step.py
new file mode 100644
index 0000000000..f2b51173bb
--- /dev/null
+++ b/engine/apps/slack/scenarios/scenario_step.py
@@ -0,0 +1,485 @@
+import importlib
+import json
+import logging
+
+from django.apps import apps
+from django.core.cache import cache
+
+from apps.slack.constants import SLACK_RATE_LIMIT_DELAY
+from apps.slack.slack_client import SlackClientWithErrorHandling
+from apps.slack.slack_client.exceptions import (
+ SlackAPIChannelArchivedException,
+ SlackAPIException,
+ SlackAPIRateLimitException,
+ SlackAPITokenException,
+)
+from common.constants.role import Role
+
+logger = logging.getLogger(__name__)
+
+
+PAYLOAD_TYPE_INTERACTIVE_MESSAGE = "interactive_message"
+ACTION_TYPE_BUTTON = "button"
+ACTION_TYPE_SELECT = "select"
+
+PAYLOAD_TYPE_SLASH_COMMAND = "slash_command"
+
+PAYLOAD_TYPE_EVENT_CALLBACK = "event_callback"
+EVENT_TYPE_MESSAGE = "message"
+EVENT_TYPE_MESSAGE_CHANNEL = "channel"
+EVENT_TYPE_MESSAGE_IM = "im"
+EVENT_TYPE_USER_CHANGE = "user_change"
+EVENT_TYPE_APP_MENTION = "app_mention"
+EVENT_TYPE_MEMBER_JOINED_CHANNEL = "member_joined_channel"
+EVENT_TYPE_IM_OPEN = "im_open"
+EVENT_TYPE_APP_HOME_OPENED = "app_home_opened"
+EVENT_TYPE_SUBTEAM_CREATED = "subteam_created"
+EVENT_TYPE_SUBTEAM_UPDATED = "subteam_updated"
+EVENT_TYPE_SUBTEAM_MEMBERS_CHANGED = "subteam_members_changed"
+EVENT_SUBTYPE_MESSAGE_CHANGED = "message_changed"
+EVENT_SUBTYPE_MESSAGE_DELETED = "message_deleted"
+EVENT_SUBTYPE_BOT_MESSAGE = "bot_message"
+EVENT_SUBTYPE_THREAD_BROADCAST = "thread_broadcast"
+EVENT_SUBTYPE_FILE_SHARE = "file_share"
+EVENT_TYPE_CHANNEL_DELETED = "channel_deleted"
+EVENT_TYPE_CHANNEL_CREATED = "channel_created"
+EVENT_TYPE_CHANNEL_RENAMED = "channel_rename"
+EVENT_TYPE_CHANNEL_ARCHIVED = "channel_archive"
+EVENT_TYPE_CHANNEL_UNARCHIVED = "channel_unarchive"
+
+PAYLOAD_TYPE_BLOCK_ACTIONS = "block_actions"
+BLOCK_ACTION_TYPE_USERS_SELECT = "users_select"
+BLOCK_ACTION_TYPE_BUTTON = "button"
+BLOCK_ACTION_TYPE_STATIC_SELECT = "static_select"
+BLOCK_ACTION_TYPE_CONVERSATIONS_SELECT = "conversations_select"
+BLOCK_ACTION_TYPE_CHANNELS_SELECT = "channels_select"
+BLOCK_ACTION_TYPE_OVERFLOW = "overflow"
+BLOCK_ACTION_TYPE_DATEPICKER = "datepicker"
+
+PAYLOAD_TYPE_DIALOG_SUBMISSION = "dialog_submission"
+PAYLOAD_TYPE_VIEW_SUBMISSION = "view_submission"
+
+PAYLOAD_TYPE_MESSAGE_ACTION = "message_action"
+
+THREAD_MESSAGE_SUBTYPE = "bot_message"
+
+MAX_STATIC_SELECT_OPTIONS = 100
+
+
+class ScenarioStep(object):
+
+ # Is a delay to prevent intermediate activity by system in case user is doing some multi-step action.
+ # For example if user wants to unack and ack we don't need to launch escalation right after unack.
+ CROSS_ACTION_DELAY = 10
+ SELECT_ORGANIZATION_AND_ROUTE_BLOCK_ID = "SELECT_ORGANIZATION_AND_ROUTE"
+
+ need_to_be_logged = True
+ random_prefix_for_routing = ""
+
+ # Some blocks are sending context via action_id, which is limited by 255 chars
+
+ TAG_ONBOARDING = "onboarding"
+ TAG_DASHBOARD = "dashboard"
+ TAG_SUBSCRIPTION = "subscription"
+ TAG_REPORTING = "reporting"
+
+ TAG_TEAM_SETTINGS = "team_settings"
+
+ TAG_TRIGGERED_BY_SYSTEM = "triggered_by_system"
+ TAG_INCIDENT_ROUTINE = "incident_routine"
+ TAG_INCIDENT_MANAGEMENT = "incident_management"
+
+ TAG_ON_CALL_SCHEDULES = "on_call_schedules"
+
+ tags = []
+
+ def __init__(self, slack_team_identity, organization=None, user=None):
+ self._slack_client = SlackClientWithErrorHandling(slack_team_identity.bot_access_token)
+ self.slack_team_identity = slack_team_identity
+ self.organization = organization
+ self.user = user
+
+ cache_tag = "step_tags_populated_{}".format(self.routing_uid())
+
+ if cache.get(cache_tag) is None:
+ cache.set(cache_tag, 1, 180)
+
+ def dispatch(self, user, team, payload, action=None):
+ return self.process_scenario(user, team, payload, action)
+
+ def process_scenario(self, user, team, payload, action=None):
+ pass
+
+ def ts(self, payload):
+ if "message_ts" in payload:
+ ts = payload["message_ts"]
+ elif (
+ "view" in payload
+ and "private_metadata" in payload["view"]
+ and payload["view"]["private_metadata"]
+ and "ts" in json.loads(payload["view"]["private_metadata"])
+ ):
+ ts = json.loads(payload["view"]["private_metadata"])["ts"]
+ elif "container" in payload and "message_ts" in payload["container"]:
+ ts = payload["container"]["message_ts"]
+ elif "state" in payload and "message_ts" in json.loads(payload["state"]):
+ ts = json.loads(payload["state"])["message_ts"]
+ else:
+ ts = "random"
+ return ts
+
+ def channel(self, user, payload):
+ if "channel" in payload and "id" in payload["channel"]:
+ channel = payload["channel"]["id"]
+ else:
+ channel = user.im_channel_id
+ return channel
+
+ @staticmethod
+ def finish_configuration_attachments(organization):
+ text = (
+ f"A few steps left to finish configuration!\n"
+ f"Go to your <{organization.web_link}?page=slack|OnCall workspace> and select default channel "
+ f"for your incidents!"
+ )
+ return [
+ {
+ "color": "#008000",
+ "blocks": [
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": text,
+ },
+ }
+ ],
+ }
+ ]
+
+ @classmethod
+ def routing_uid(cls):
+ return cls.random_prefix_for_routing + cls.__name__
+
+ @classmethod
+ def get_step(cls, scenario, step):
+ """
+ This is a dynamic Step loader to avoid circular dependencies in scenario files
+ """
+ # Just in case circular dependencies will be an issue again, this may help:
+ # https://stackoverflow.com/posts/36442015/revisions
+ try:
+ module = importlib.import_module("apps.slack.scenarios." + scenario)
+ step = getattr(module, step)
+ return step
+ except ImportError as e:
+ raise Exception("Check import spelling! Scenario: {}, Step:{}, Error: {}".format(scenario, step, e))
+
+ def process_scenario_from_other_step(
+ self, slack_user_identity, slack_team_identity, payload, step_class, action=None, kwargs={}
+ ):
+ """
+ Allows to trigger other step from current step
+ """
+ step = step_class(slack_team_identity)
+ step.process_scenario(slack_user_identity, slack_team_identity, payload, action=action, **kwargs)
+
+ def get_permission_denied_prompt(self):
+ current_role = self.user.get_role_display()
+ admins_queryset = self.organization.users.filter(role=Role.ADMIN).select_related("slack_user_identity")
+ admins_verbal = "No admins"
+ if admins_queryset.count() > 0:
+ admins_verbal = ", ".join(["<@{}>".format(admin.slack_user_identity.slack_id) for admin in admins_queryset])
+
+ return current_role, admins_verbal
+
+ def open_warning_window(self, payload, warning_text, title=None):
+ if title is None:
+ title = ":warning: Warning"
+ view = {
+ "type": "modal",
+ "callback_id": "warning",
+ "title": {
+ "type": "plain_text",
+ "text": title,
+ },
+ "close": {
+ "type": "plain_text",
+ "text": "Ok",
+ "emoji": True,
+ },
+ "blocks": [
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": warning_text,
+ },
+ },
+ ],
+ }
+ self._slack_client.api_call(
+ "views.open",
+ trigger_id=payload["trigger_id"],
+ view=view,
+ )
+
+ def get_alert_group_from_slack_message(self, payload):
+ SlackMessage = apps.get_model("slack", "SlackMessage")
+
+ message_ts = payload.get("message_ts") or payload["container"]["message_ts"] # interactive message or block
+ channel_id = payload["channel"]["id"]
+
+ try:
+ slack_message = SlackMessage.objects.get(
+ slack_id=message_ts,
+ _slack_team_identity=self.slack_team_identity,
+ channel_id=channel_id,
+ )
+ alert_group = slack_message.get_alert_group()
+ except SlackMessage.DoesNotExist as e:
+ print(
+ f"Tried to get SlackMessage from message_ts:"
+ f"Slack Team Identity pk: {self.slack_team_identity.pk},"
+ f"Message ts: {message_ts}"
+ )
+ raise e
+ except SlackMessage.alert.RelatedObjectDoesNotExist as e:
+ print(
+ f"Tried to get Alert Group from SlackMessage:"
+ f"Slack Team Identity pk: {self.slack_team_identity.pk},"
+ f"SlackMessage pk: {slack_message.pk}"
+ )
+ raise e
+ return alert_group
+
+ def _update_slack_message(self, alert_group):
+ logger.info(f"Started _update_slack_message for alert_group {alert_group.pk}")
+ SlackMessage = apps.get_model("slack", "SlackMessage")
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+
+ slack_message = alert_group.slack_message
+ attachments = alert_group.render_slack_attachments()
+ blocks = alert_group.render_slack_blocks()
+ logger.info(f"Update message for alert_group {alert_group.pk}")
+ try:
+ self._slack_client.api_call(
+ "chat.update",
+ channel=slack_message.channel_id,
+ ts=slack_message.slack_id,
+ attachments=attachments,
+ blocks=blocks,
+ )
+ logger.info(f"Message has been updated for alert_group {alert_group.pk}")
+ except SlackAPIRateLimitException as e:
+ if alert_group.channel.integration != AlertReceiveChannel.INTEGRATION_MAINTENANCE:
+ if not alert_group.channel.is_rate_limited_in_slack:
+ delay = e.response.get("rate_limit_delay") or SLACK_RATE_LIMIT_DELAY
+ alert_group.channel.start_send_rate_limit_message_task(delay)
+ logger.info(
+ f"Message has not been updated for alert_group {alert_group.pk} due to slack rate limit."
+ )
+ else:
+ raise e
+
+ except SlackAPIException as e:
+ if e.response["error"] == "message_not_found":
+ logger.info(f"message_not_found for alert_group {alert_group.pk}, trying to post new message")
+ result = self._slack_client.api_call(
+ "chat.postMessage", channel=slack_message.channel_id, attachments=attachments, blocks=blocks
+ )
+ slack_message_updated = SlackMessage(
+ slack_id=result["ts"],
+ organization=slack_message.organization,
+ _slack_team_identity=slack_message.slack_team_identity,
+ channel_id=slack_message.channel_id,
+ alert_group=alert_group,
+ )
+ slack_message_updated.save()
+ alert_group.slack_message = slack_message_updated
+ alert_group.save(update_fields=["slack_message"])
+ logger.info(f"Message has been posted for alert_group {alert_group.pk}")
+ elif e.response["error"] == "is_inactive": # deleted channel error
+ logger.info(f"Skip updating slack message for alert_group {alert_group.pk} due to is_inactive")
+ elif e.response["error"] == "account_inactive":
+ logger.info(f"Skip updating slack message for alert_group {alert_group.pk} due to account_inactive")
+ elif e.response["error"] == "channel_not_found":
+ logger.info(f"Skip updating slack message for alert_group {alert_group.pk} due to channel_not_found")
+ else:
+ raise e
+ logger.info(f"Finished _update_slack_message for alert_group {alert_group.pk}")
+
+ def _publish_message_to_thread(self, alert_group, attachments, mrkdwn=True, unfurl_links=True):
+ # TODO: refactor checking the possibility of sending message to slack
+ # do not try to post message to slack if integration is rate limited
+ if alert_group.channel.is_rate_limited_in_slack:
+ return
+
+ SlackMessage = apps.get_model("slack", "SlackMessage")
+ slack_message = alert_group.get_slack_message()
+ channel_id = slack_message.channel_id
+ try:
+ result = self._slack_client.api_call(
+ "chat.postMessage",
+ channel=channel_id,
+ attachments=attachments,
+ thread_ts=slack_message.slack_id,
+ mrkdwn=mrkdwn,
+ unfurl_links=unfurl_links,
+ )
+ except SlackAPITokenException as e:
+ logger.warning(
+ f"Unable to post message to thread in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"{e}"
+ )
+ except SlackAPIChannelArchivedException:
+ logger.warning(
+ f"Unable to post message to thread in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'is_archived'"
+ )
+ except SlackAPIException as e:
+ if e.response["error"] == "channel_not_found": # channel was deleted
+ logger.warning(
+ f"Unable to post message to thread in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'channel_not_found'"
+ )
+ elif e.response["error"] == "invalid_auth":
+ logger.warning(
+ f"Unable to post message to thread in slack. "
+ f"Slack team identity pk: {self.slack_team_identity.pk}.\n"
+ f"Reason: 'invalid_auth'"
+ )
+ else:
+ raise e
+ else:
+ SlackMessage(
+ slack_id=result["ts"],
+ organization=alert_group.channel.organization,
+ _slack_team_identity=self.slack_team_identity,
+ channel_id=channel_id,
+ alert_group=alert_group,
+ ).save()
+
+ def get_select_user_element(
+ self, action_id, multi_select=False, initial_user=None, initial_users_list=None, text=None
+ ):
+ if not text:
+ text = f"Select User{'' if not multi_select else 's'}"
+ element = {
+ "action_id": action_id,
+ "type": "static_select" if not multi_select else "multi_static_select",
+ "placeholder": {
+ "type": "plain_text",
+ "text": text,
+ "emoji": True,
+ },
+ }
+
+ users = self.organization.users.all().select_related("slack_user_identity")
+
+ users_count = users.count()
+ options = []
+
+ for user in users:
+ user_verbal = f"{user.get_user_verbal_for_team_for_slack()}"
+ if len(user_verbal) > 75:
+ user_verbal = user_verbal[:72] + "..."
+ option = {"text": {"type": "plain_text", "text": user_verbal}, "value": json.dumps({"user_id": user.pk})}
+ options.append(option)
+
+ if users_count > MAX_STATIC_SELECT_OPTIONS:
+ option_groups = []
+ option_groups_chunks = [
+ options[x : x + MAX_STATIC_SELECT_OPTIONS] for x in range(0, len(options), MAX_STATIC_SELECT_OPTIONS)
+ ]
+ for option_group in option_groups_chunks:
+ option_group = {"label": {"type": "plain_text", "text": " "}, "options": option_group}
+ option_groups.append(option_group)
+ element["option_groups"] = option_groups
+ elif users_count == 0: # strange case when there are no users to select
+ option = {
+ "text": {"type": "plain_text", "text": "No users to select"},
+ "value": json.dumps({"user_id": None}),
+ }
+ options.append(option)
+ element["options"] = options
+ return element
+ else:
+ element["options"] = options
+
+ # add initial option
+ if multi_select and initial_users_list:
+ if users_count <= MAX_STATIC_SELECT_OPTIONS:
+ initial_options = []
+ for user in users:
+ user_verbal = f"{user.get_user_verbal_for_team_for_slack()}"
+ option = {
+ "text": {"type": "plain_text", "text": user_verbal},
+ "value": json.dumps({"user_id": user.pk}),
+ }
+ initial_options.append(option)
+ element["initial_options"] = initial_options
+ elif not multi_select and initial_user:
+ user_verbal = f"{initial_user.get_user_verbal_for_team_for_slack()}"
+ initial_option = {
+ "text": {"type": "plain_text", "text": user_verbal},
+ "value": json.dumps({"user_id": initial_user.pk}),
+ }
+ element["initial_option"] = initial_option
+
+ return element
+
+ def get_select_organization_route_element(self, slack_team_identity, slack_user_identity):
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+
+ organizations = slack_team_identity.organizations.filter(
+ users__slack_user_identity=slack_user_identity
+ ).distinct()
+ organizations_options = []
+
+ for organization in organizations:
+ manual_integration = AlertReceiveChannel.get_or_create_manual_integration(
+ organization=organization,
+ integration=AlertReceiveChannel.INTEGRATION_MANUAL,
+ deleted_at=None,
+ defaults={"author": self.user},
+ )
+
+ for route in manual_integration.channel_filters.all():
+ filtering_term = f'"{route.filtering_term}"'
+ if route.is_default:
+ filtering_term = "default"
+ organizations_options.append(
+ {
+ "text": {
+ "type": "plain_text",
+ "text": f"{organization.org_title}: {filtering_term}",
+ "emoji": True,
+ },
+ "value": f"{organization.pk}-{route.pk}",
+ }
+ )
+
+ organization_selection_block = {
+ "type": "input",
+ "block_id": ScenarioStep.SELECT_ORGANIZATION_AND_ROUTE_BLOCK_ID,
+ "element": {
+ "type": "static_select",
+ "placeholder": {
+ "type": "plain_text",
+ "text": "Select organization",
+ },
+ "action_id": ScenarioStep.SELECT_ORGANIZATION_AND_ROUTE_BLOCK_ID,
+ "options": organizations_options,
+ "initial_option": organizations_options[0],
+ },
+ "label": {
+ "type": "plain_text",
+ "text": "Select organization and route:",
+ "emoji": True,
+ },
+ }
+ return organization_selection_block
diff --git a/engine/apps/slack/scenarios/schedules.py b/engine/apps/slack/scenarios/schedules.py
new file mode 100644
index 0000000000..9c5155a49a
--- /dev/null
+++ b/engine/apps/slack/scenarios/schedules.py
@@ -0,0 +1,330 @@
+import json
+
+import pytz
+from django.utils import timezone
+
+from apps.schedules.models import OnCallSchedule
+from apps.slack.scenarios import scenario_step
+from apps.slack.utils import format_datetime_to_slack
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+
+
+class EditScheduleShiftNotifyStep(scenario_step.ScenarioStep):
+ tags = [scenario_step.ScenarioStep.TAG_ON_CALL_SCHEDULES]
+
+ notify_empty_oncall_options = {choice[0]: choice[1] for choice in OnCallSchedule.NotifyEmptyOnCall.choices}
+ notify_oncall_shift_freq_options = {choice[0]: choice[1] for choice in OnCallSchedule.NotifyOnCallShiftFreq.choices}
+ mention_oncall_start_options = {1: "Mention person in slack", 0: "Inform in channel without mention"}
+ mention_oncall_next_options = {1: "Mention person in slack", 0: "Inform in channel without mention"}
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ if payload["actions"][0].get("value", None) and payload["actions"][0]["value"].startswith("edit"):
+ self.open_settings_modal(payload)
+ elif payload["actions"][0].get("type", None) and payload["actions"][0]["type"] == "static_select":
+ self.set_selected_value(slack_user_identity, payload)
+
+ def open_settings_modal(self, payload, schedule_id=None):
+ schedule_id = payload["actions"][0]["value"].split("_")[1] if schedule_id is None else schedule_id
+ try:
+ _ = OnCallSchedule.objects.get(pk=schedule_id) # noqa
+ except OnCallSchedule.DoesNotExist:
+ blocks = [{"type": "section", "text": {"type": "plain_text", "text": "Schedule was removed"}}]
+ else:
+ blocks = self.get_modal_blocks(schedule_id)
+
+ private_metadata = {}
+ private_metadata["schedule_id"] = schedule_id
+
+ view = {
+ "callback_id": EditScheduleShiftNotifyStep.routing_uid(),
+ "blocks": blocks,
+ "type": "modal",
+ "title": {
+ "type": "plain_text",
+ "text": "Notification preferences",
+ },
+ "private_metadata": json.dumps(private_metadata),
+ }
+
+ self._slack_client.api_call(
+ "views.open",
+ trigger_id=payload["trigger_id"],
+ view=view,
+ )
+
+ def set_selected_value(self, slack_user_identity, payload):
+ action = payload["actions"][0]
+ private_metadata = json.loads(payload["view"]["private_metadata"])
+ schedule_id = private_metadata["schedule_id"]
+ schedule = OnCallSchedule.objects.get(pk=schedule_id)
+ old_state = schedule.repr_settings_for_client_side_logging
+ setattr(schedule, action["block_id"], int(action["selected_option"]["value"]))
+ schedule.save()
+ new_state = schedule.repr_settings_for_client_side_logging
+ description = f"Schedule {schedule.name} was changed from:\n{old_state}\nto:\n{new_state}"
+ create_organization_log(
+ schedule.organization,
+ slack_user_identity.get_user(schedule.organization),
+ OrganizationLogType.TYPE_SCHEDULE_CHANGED,
+ description,
+ )
+
+ def get_modal_blocks(self, schedule_id):
+ blocks = [
+ {
+ "type": "section",
+ "text": {"type": "plain_text", "text": "Notification frequency"},
+ "block_id": "notify_oncall_shift_freq",
+ "accessory": {
+ "type": "static_select",
+ "placeholder": {"type": "plain_text", "text": "----"},
+ "action_id": EditScheduleShiftNotifyStep.routing_uid(),
+ "options": self.get_options("notify_oncall_shift_freq"),
+ "initial_option": self.get_initial_option(schedule_id, "notify_oncall_shift_freq"),
+ },
+ },
+ {
+ "type": "section",
+ "text": {"type": "plain_text", "text": "Current shift notification settings"},
+ "block_id": "mention_oncall_start",
+ "accessory": {
+ "type": "static_select",
+ "placeholder": {"type": "plain_text", "text": "----"},
+ "action_id": EditScheduleShiftNotifyStep.routing_uid(),
+ "options": self.get_options("mention_oncall_start"),
+ "initial_option": self.get_initial_option(schedule_id, "mention_oncall_start"),
+ },
+ },
+ {
+ "type": "section",
+ "text": {"type": "plain_text", "text": "Next shift notification settings"},
+ "block_id": "mention_oncall_next",
+ "accessory": {
+ "type": "static_select",
+ "placeholder": {"type": "plain_text", "text": "----"},
+ "action_id": EditScheduleShiftNotifyStep.routing_uid(),
+ "options": self.get_options("mention_oncall_next"),
+ "initial_option": self.get_initial_option(schedule_id, "mention_oncall_next"),
+ },
+ },
+ {
+ "type": "section",
+ "text": {"type": "plain_text", "text": "Action for slot when no one is on-call"},
+ "block_id": "notify_empty_oncall",
+ "accessory": {
+ "type": "static_select",
+ "placeholder": {"type": "plain_text", "text": "----"},
+ "action_id": EditScheduleShiftNotifyStep.routing_uid(),
+ "options": self.get_options("notify_empty_oncall"),
+ "initial_option": self.get_initial_option(schedule_id, "notify_empty_oncall"),
+ },
+ },
+ ]
+
+ return blocks
+
+ def get_options(self, select_name):
+ select_options = getattr(self, f"{select_name}_options")
+ return [
+ {"text": {"type": "plain_text", "text": select_options[option]}, "value": str(option)}
+ for option in select_options
+ ]
+
+ def get_initial_option(self, schedule_id, select_name):
+
+ schedule = OnCallSchedule.objects.get(pk=schedule_id)
+
+ current_value = getattr(schedule, select_name)
+ text = getattr(self, f"{select_name}_options")[current_value]
+
+ initial_option = {
+ "text": {
+ "type": "plain_text",
+ "text": f"{text}",
+ },
+ "value": str(int(current_value)),
+ }
+
+ return initial_option
+
+ @classmethod
+ def get_report_blocks_ical(cls, new_shifts, next_shifts, schedule, empty):
+ organization = schedule.organization
+ if empty:
+ if schedule.notify_empty_oncall == schedule.NotifyEmptyOnCall.ALL:
+ now_text = "Inviting . No one on-call now!\n"
+ elif schedule.notify_empty_oncall == schedule.NotifyEmptyOnCall.PREV:
+ user_ids = []
+ for item in json.loads(schedule.current_shifts).values():
+ user_ids.extend(item.get("users", []))
+ prev_users = organization.users.filter(id__in=user_ids)
+ users_verbal = " ".join(
+ [f"{user.get_user_verbal_for_team_for_slack(mention=True)}" for user in prev_users]
+ )
+ now_text = f"No one on-call now! Inviting prev shift: {users_verbal}\n"
+ else:
+ now_text = "No one on-call now!\n"
+
+ else:
+ now_text = ""
+ for shift in new_shifts:
+ users = shift["users"]
+ now_text += cls.get_ical_shift_notification_text(shift, schedule.mention_oncall_start, users)
+ now_text = "*New on-call shift:*\n" + now_text
+
+ if len(next_shifts) == 0:
+ if len(new_shifts) == 0:
+ next_text = "No one on-call next hour!"
+ else:
+ next_text = "No one on-call next!"
+ else:
+ next_text = ""
+ for shift in next_shifts:
+ users = shift["users"]
+ next_text += cls.get_ical_shift_notification_text(shift, schedule.mention_oncall_next, users)
+ next_text = "\n*Next on-call shift:*\n" + next_text
+
+ text = f"{now_text}{next_text}"
+ blocks = [
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": text,
+ "verbatim": True,
+ },
+ },
+ {
+ "type": "actions",
+ "elements": [
+ {
+ "type": "button",
+ "action_id": f"{cls.routing_uid()}",
+ "text": {"type": "plain_text", "text": ":gear:", "emoji": True},
+ "value": f"edit_{schedule.pk}",
+ }
+ ],
+ },
+ {"type": "context", "elements": [{"type": "mrkdwn", "text": f"On-call schedule *{schedule.name}*"}]},
+ ]
+ return blocks
+
+ @classmethod
+ def get_report_blocks_manual(cls, current_shift, next_shift, schedule):
+
+ current_piece, current_user = current_shift
+
+ start_day = timezone.datetime.now()
+ current_hour = timezone.datetime.today().hour
+ start_hour = current_piece.starts_at.hour
+ if start_hour > current_hour:
+ start_day -= timezone.timedelta(days=1)
+
+ shift_start = start_day.replace(hour=start_hour, minute=0, second=0, microsecond=0)
+ shift_end = shift_start + timezone.timedelta(hours=12)
+ shift_start_timestamp = int(shift_start.astimezone(pytz.UTC).timestamp())
+ shift_end_timestamp = int(shift_end.astimezone(pytz.UTC).timestamp())
+
+ next_shift_end = shift_end + timezone.timedelta(hours=12)
+ next_shift_end_timestamp = int(next_shift_end.astimezone(pytz.UTC).timestamp())
+
+ now_text = "_*Now*_:\n"
+ if schedule.mention_oncall_start:
+ user_mention = current_user.get_user_verbal_for_team_for_slack(
+ mention=True,
+ )
+
+ else:
+ user_mention = current_user.get_user_verbal_for_team_for_slack(
+ mention=False,
+ )
+ now_text += f"*{user_mention}*"
+
+ now_text += f" from {format_datetime_to_slack(shift_start_timestamp)}"
+ now_text += f" to {format_datetime_to_slack(shift_end_timestamp)}"
+
+ next_piece, next_user = next_shift
+ next_text = "\n_*Next*_:\n"
+ if schedule.mention_oncall_next:
+ user_mention = next_user.get_user_verbal_for_team_for_slack(
+ mention=True,
+ )
+ else:
+ user_mention = next_user.get_user_verbal_for_team_for_slack(
+ mention=False,
+ )
+ next_text += f"*{user_mention}*"
+
+ next_text += f" from {format_datetime_to_slack(shift_end_timestamp)}"
+ next_text += f" to {format_datetime_to_slack(next_shift_end_timestamp)}"
+
+ text = f"{now_text}{next_text}"
+ blocks = [
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": text,
+ "verbatim": True,
+ },
+ },
+ {
+ "type": "actions",
+ "elements": [
+ {
+ "type": "button",
+ "action_id": f"{cls.routing_uid()}",
+ "text": {"type": "plain_text", "text": ":gear:", "emoji": True},
+ "value": f"edit_{schedule.pk}",
+ }
+ ],
+ },
+ {"type": "context", "elements": [{"type": "mrkdwn", "text": f"On-call schedule *{schedule.name}*"}]},
+ ]
+
+ return blocks
+
+ @classmethod
+ def get_ical_shift_notification_text(cls, shift, mention, users):
+
+ if shift["all_day"]:
+ notification = " ".join([f"{user.get_user_verbal_for_team_for_slack(mention=mention)}" for user in users])
+ user_verbal = shift["users"][0].get_user_verbal_for_team_for_slack(
+ mention=False,
+ )
+ if shift["start"].day == shift["end"].day:
+ all_day_text = shift["start"].strftime("%b %d")
+ else:
+ all_day_text = f'From {shift["start"].strftime("%b %d")} to {shift["end"].strftime("%b %d")}'
+ notification += (
+ f" {all_day_text} _All-day event in *{user_verbal}'s* timezone_ " f'- {shift["users"][0].timezone}.\n'
+ )
+ else:
+ shift_start_timestamp = int(shift["start"].astimezone(pytz.UTC).timestamp())
+ shift_end_timestamp = int(shift["end"].astimezone(pytz.UTC).timestamp())
+
+ notification = (
+ " ".join([f"{user.get_user_verbal_for_team_for_slack(mention=mention)}" for user in users])
+ + f" from {format_datetime_to_slack(shift_start_timestamp)}"
+ f" to {format_datetime_to_slack(shift_end_timestamp)}\n"
+ )
+ priority = shift.get("priority", 0) - shift.get("priority_increased_by", 0)
+ if priority != 0:
+ notification = f"[L{shift.get('priority')}] {notification}"
+ return notification
+
+
+STEPS_ROUTING = [
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,
+ "block_action_type": scenario_step.BLOCK_ACTION_TYPE_BUTTON,
+ "block_action_id": EditScheduleShiftNotifyStep.routing_uid(),
+ "step": EditScheduleShiftNotifyStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_BLOCK_ACTIONS,
+ "block_action_type": scenario_step.BLOCK_ACTION_TYPE_STATIC_SELECT,
+ "block_action_id": EditScheduleShiftNotifyStep.routing_uid(),
+ "step": EditScheduleShiftNotifyStep,
+ },
+]
diff --git a/engine/apps/slack/scenarios/slack_channel.py b/engine/apps/slack/scenarios/slack_channel.py
new file mode 100644
index 0000000000..0893142354
--- /dev/null
+++ b/engine/apps/slack/scenarios/slack_channel.py
@@ -0,0 +1,128 @@
+from contextlib import suppress
+
+from django.apps import apps
+from django.utils import timezone
+
+from apps.slack.scenarios import scenario_step
+
+
+class SlackChannelCreatedOrRenamedEventStep(scenario_step.ScenarioStep):
+ tags = [
+ scenario_step.ScenarioStep.TAG_TRIGGERED_BY_SYSTEM,
+ ]
+
+ # Avoid logging this step to prevent collecting sensitive data of our customers
+ need_to_be_logged = False
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ """
+ Triggered by action: Create or rename channel
+ """
+ SlackChannel = apps.get_model("slack", "SlackChannel")
+
+ slack_id = payload["event"]["channel"]["id"]
+ channel_name = payload["event"]["channel"]["name"]
+
+ SlackChannel.objects.update_or_create(
+ slack_id=slack_id,
+ slack_team_identity=slack_team_identity,
+ defaults={
+ "name": channel_name,
+ "last_populated": timezone.now().date(),
+ },
+ )
+
+
+class SlackChannelDeletedEventStep(scenario_step.ScenarioStep):
+ tags = [
+ scenario_step.ScenarioStep.TAG_TRIGGERED_BY_SYSTEM,
+ ]
+
+ # Avoid logging this step to prevent collecting sensitive data of our customers
+ need_to_be_logged = False
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ """
+ Triggered by action: Delete channel
+ """
+ SlackChannel = apps.get_model("slack", "SlackChannel")
+
+ slack_id = payload["event"]["channel"]
+ with suppress(SlackChannel.DoesNotExist):
+ SlackChannel.objects.get(
+ slack_id=slack_id,
+ slack_team_identity=slack_team_identity,
+ ).delete()
+
+
+class SlackChannelArchivedEventStep(scenario_step.ScenarioStep):
+ tags = [
+ scenario_step.ScenarioStep.TAG_TRIGGERED_BY_SYSTEM,
+ ]
+
+ # Avoid logging this step to prevent collecting sensitive data of our customers
+ need_to_be_logged = False
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ """
+ Triggered by action: Archive channel
+ """
+ SlackChannel = apps.get_model("slack", "SlackChannel")
+
+ slack_id = payload["event"]["channel"]
+
+ SlackChannel.objects.filter(
+ slack_id=slack_id,
+ slack_team_identity=slack_team_identity,
+ ).update(is_archived=True)
+
+
+class SlackChannelUnArchivedEventStep(scenario_step.ScenarioStep):
+ tags = [
+ scenario_step.ScenarioStep.TAG_TRIGGERED_BY_SYSTEM,
+ ]
+
+ # Avoid logging this step to prevent collecting sensitive data of our customers
+ need_to_be_logged = False
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ """
+ Triggered by action: UnArchive channel
+ """
+ SlackChannel = apps.get_model("slack", "SlackChannel")
+
+ slack_id = payload["event"]["channel"]
+
+ SlackChannel.objects.filter(
+ slack_id=slack_id,
+ slack_team_identity=slack_team_identity,
+ ).update(is_archived=False)
+
+
+STEPS_ROUTING = [
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_EVENT_CALLBACK,
+ "event_type": scenario_step.EVENT_TYPE_CHANNEL_RENAMED,
+ "step": SlackChannelCreatedOrRenamedEventStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_EVENT_CALLBACK,
+ "event_type": scenario_step.EVENT_TYPE_CHANNEL_CREATED,
+ "step": SlackChannelCreatedOrRenamedEventStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_EVENT_CALLBACK,
+ "event_type": scenario_step.EVENT_TYPE_CHANNEL_DELETED,
+ "step": SlackChannelDeletedEventStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_EVENT_CALLBACK,
+ "event_type": scenario_step.EVENT_TYPE_CHANNEL_ARCHIVED,
+ "step": SlackChannelArchivedEventStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_EVENT_CALLBACK,
+ "event_type": scenario_step.EVENT_TYPE_CHANNEL_UNARCHIVED,
+ "step": SlackChannelUnArchivedEventStep,
+ },
+]
diff --git a/engine/apps/slack/scenarios/slack_channel_integration.py b/engine/apps/slack/scenarios/slack_channel_integration.py
new file mode 100644
index 0000000000..bae779cd02
--- /dev/null
+++ b/engine/apps/slack/scenarios/slack_channel_integration.py
@@ -0,0 +1,193 @@
+import json
+import logging
+
+from django.apps import apps
+
+from apps.integrations.tasks import create_alert
+from apps.slack.scenarios import scenario_step
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+class SlackChannelMessageEventStep(scenario_step.ScenarioStep):
+ tags = [
+ scenario_step.ScenarioStep.TAG_TRIGGERED_BY_SYSTEM,
+ ]
+
+ # Avoid logging this step to prevent collecting sensitive data of our customers
+ need_to_be_logged = False
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ """
+ Triggered by action: Any new message in channel.
+ Dangerous because it's often triggered by internal client's company systems.
+ May cause flood, should be ready for useless updates.
+ """
+
+ # If it is a message from thread - save it for resolution note
+ if ("thread_ts" in payload["event"] and "subtype" not in payload["event"]) or (
+ payload["event"].get("subtype") == scenario_step.EVENT_SUBTYPE_MESSAGE_CHANGED
+ and "subtype" not in payload["event"]["message"]
+ and "thread_ts" in payload["event"]["message"]
+ ):
+ self.save_thread_message_for_resolution_note(slack_user_identity, payload)
+ elif (
+ payload["event"].get("subtype") == scenario_step.EVENT_SUBTYPE_MESSAGE_DELETED
+ and "thread_ts" in payload["event"]["previous_message"]
+ ):
+ self.delete_thread_message_from_resolution_note(slack_user_identity, payload)
+ # Otherwise check if it is a message from channel with Slack Channel Integration
+ else:
+ self.create_alert_for_slack_channel_integration_if_needed(payload)
+
+ def save_thread_message_for_resolution_note(self, slack_user_identity, payload):
+ ResolutionNoteSlackMessage = apps.get_model("alerts", "ResolutionNoteSlackMessage")
+ SlackMessage = apps.get_model("slack", "SlackMessage")
+
+ if slack_user_identity is None:
+ logger.warning(
+ f"Empty slack_user_identity in PublicMainMenu step:\n"
+ f"{self.slack_team_identity} {self.slack_team_identity.pk}"
+ )
+ return
+
+ channel = payload["event"]["channel"]
+ thread_ts = payload["event"].get("thread_ts") or payload["event"]["message"]["thread_ts"]
+ # sometimes we get messages with empty text, probably because it's an image or attachment
+ event_text = payload["event"].get("text")
+ event_text = "empty message" if event_text == "" else event_text
+ text = event_text or payload["event"]["message"]["text"]
+
+ if "message" in payload["event"]:
+ message_ts = payload["event"]["message"]["ts"]
+ else:
+ message_ts = payload["event"]["ts"]
+
+ try:
+ slack_message = SlackMessage.objects.get(
+ slack_id=thread_ts,
+ channel_id=channel,
+ _slack_team_identity=self.slack_team_identity,
+ )
+ except SlackMessage.DoesNotExist:
+ return
+
+ alert_group = slack_message.get_alert_group()
+
+ result = self._slack_client.api_call(
+ "chat.getPermalink",
+ channel=channel,
+ message_ts=message_ts,
+ )
+ permalink = None
+ if result["permalink"] is not None:
+ permalink = result["permalink"]
+
+ try:
+ slack_thread_message = ResolutionNoteSlackMessage.objects.get(
+ ts=message_ts,
+ thread_ts=thread_ts,
+ alert_group=alert_group,
+ )
+ if len(text) > 2900:
+ if slack_thread_message.added_to_resolution_note:
+ return self._slack_client.api_call(
+ "chat.postEphemeral",
+ channel=channel,
+ user=slack_user_identity.slack_id,
+ text=":warning: Unable to update the <{}|message> in Resolution Note: the message is too long ({}). "
+ "Max length - 2900 symbols.".format(permalink, len(text)),
+ )
+ else:
+ return
+ slack_thread_message.text = text
+ slack_thread_message.save()
+
+ except ResolutionNoteSlackMessage.DoesNotExist:
+ if len(text) > 2900:
+ return self._slack_client.api_call(
+ "chat.postEphemeral",
+ channel=channel,
+ user=slack_user_identity.slack_id,
+ text=":warning: The <{}|message> will not be displayed in Resolution Note: "
+ "the message is too long ({}). Max length - 2900 symbols.".format(permalink, len(text)),
+ )
+ slack_thread_message = ResolutionNoteSlackMessage(
+ alert_group=alert_group,
+ user=self.user,
+ added_by_user=self.user,
+ text=text,
+ slack_channel_id=channel,
+ thread_ts=thread_ts,
+ ts=message_ts,
+ permalink=permalink,
+ )
+ slack_thread_message.save()
+
+ def delete_thread_message_from_resolution_note(self, slack_user_identity, payload):
+ ResolutionNoteSlackMessage = apps.get_model("alerts", "ResolutionNoteSlackMessage")
+
+ if slack_user_identity is None:
+ logger.warning(
+ f"Empty slack_user_identity in PublicMainMenu step:\n"
+ f"{self.slack_team_identity} {self.slack_team_identity.pk}"
+ )
+ return
+
+ channel_id = payload["event"]["channel"]
+ message_ts = payload["event"]["previous_message"]["ts"]
+ thread_ts = payload["event"]["previous_message"]["thread_ts"]
+ try:
+ slack_thread_message = ResolutionNoteSlackMessage.objects.get(
+ ts=message_ts,
+ thread_ts=thread_ts,
+ slack_channel_id=channel_id,
+ )
+ except ResolutionNoteSlackMessage.DoesNotExist:
+ pass
+ else:
+ alert_group = slack_thread_message.alert_group
+ slack_thread_message.delete()
+ self._update_slack_message(alert_group)
+
+ def create_alert_for_slack_channel_integration_if_needed(self, payload):
+ if "subtype" in payload["event"] and payload["event"]["subtype"] != scenario_step.EVENT_SUBTYPE_FILE_SHARE:
+ return
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+ alert_receive_channels = AlertReceiveChannel.objects.filter(
+ integration_slack_channel_id=payload["event"]["channel"], organization=self.organization
+ ).all()
+ for alert_receive_channel in alert_receive_channels:
+ r = self._slack_client.api_call(
+ "chat.getPermalink",
+ channel=payload["event"]["channel"],
+ message_ts=payload["event"]["ts"],
+ )
+ # insert permalink to payload to have access to it in templaters
+ payload["event"]["amixr_mixin"] = {"permalink": r["permalink"]}
+
+ create_alert.apply_async(
+ [],
+ {
+ "title": "<#{}>".format(payload["event"]["channel"]),
+ "message": "{}\n_New message in <#{}> channel_".format(
+ payload["event"]["text"], payload["event"]["channel"]
+ ),
+ "image_url": None,
+ "link_to_upstream_details": r["permalink"],
+ "alert_receive_channel_pk": alert_receive_channel.pk,
+ "integration_unique_data": json.dumps(payload["event"]),
+ "raw_request_data": payload["event"],
+ },
+ )
+
+
+STEPS_ROUTING = [
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_EVENT_CALLBACK,
+ "event_type": scenario_step.EVENT_TYPE_MESSAGE,
+ "message_channel_type": scenario_step.EVENT_TYPE_MESSAGE_CHANNEL,
+ "step": SlackChannelMessageEventStep,
+ }
+]
diff --git a/engine/apps/slack/scenarios/slack_renderer.py b/engine/apps/slack/scenarios/slack_renderer.py
new file mode 100644
index 0000000000..be5f9c6b7a
--- /dev/null
+++ b/engine/apps/slack/scenarios/slack_renderer.py
@@ -0,0 +1,51 @@
+import humanize
+from django.apps import apps
+
+from apps.alerts.incident_log_builder import IncidentLogBuilder
+
+
+class AlertGroupLogSlackRenderer:
+ @staticmethod
+ def render_incident_log_report_for_slack(alert_group):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+
+ log_builder = IncidentLogBuilder(alert_group)
+ all_log_records = log_builder.get_log_records_list()
+
+ attachments = []
+
+ # get rendered logs
+ result = "Alert Group log:\n\n"
+ for log_record in all_log_records: # list of AlertGroupLogRecord and UserNotificationPolicyLogRecord logs
+ if type(log_record) == AlertGroupLogRecord:
+ result += f"{log_record.rendered_incident_log_line(for_slack=True)}\n"
+ elif type(log_record) == UserNotificationPolicyLogRecord:
+ result += f"{log_record.rendered_notification_log_line(for_slack=True)}\n"
+
+ attachments.append(
+ {
+ "text": result,
+ }
+ )
+ result = ""
+
+ # check if escalation or invitation active
+ if not (
+ alert_group.resolved or alert_group.is_archived or alert_group.wiped_at or alert_group.root_alert_group
+ ):
+ escalation_policies_plan = log_builder.get_incident_escalation_plan(for_slack=True)
+ if escalation_policies_plan:
+ result += "\n:arrow_down: :arrow_down: :arrow_down: Plan:\n\n"
+ # humanize time, create plan text
+ for time in sorted(escalation_policies_plan):
+ for plan_line in escalation_policies_plan[time]:
+ result += f"*{humanize.naturaldelta(time)}:* {plan_line}\n"
+
+ if len(result) > 0:
+ attachments.append(
+ {
+ "text": result,
+ }
+ )
+ return attachments
diff --git a/engine/apps/slack/scenarios/slack_usergroup.py b/engine/apps/slack/scenarios/slack_usergroup.py
new file mode 100644
index 0000000000..c978f05db7
--- /dev/null
+++ b/engine/apps/slack/scenarios/slack_usergroup.py
@@ -0,0 +1,88 @@
+from django.apps import apps
+from django.utils import timezone
+
+from apps.slack.scenarios import scenario_step
+
+
+class SlackUserGroupEventStep(scenario_step.ScenarioStep):
+ tags = [
+ scenario_step.ScenarioStep.TAG_TRIGGERED_BY_SYSTEM,
+ ]
+
+ # Avoid logging this step to prevent collecting sensitive data of our customers
+ need_to_be_logged = False
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ """
+ Triggered by action: creation user groups or changes in user groups except its members.
+ """
+ SlackUserGroup = apps.get_model("slack", "SlackUserGroup")
+
+ slack_id = payload["event"]["subteam"]["id"]
+ usergroup_name = payload["event"]["subteam"]["name"]
+ usergroup_handle = payload["event"]["subteam"]["handle"]
+ members = payload["event"]["subteam"].get("users", [])
+ is_active = payload["event"]["subteam"]["date_delete"] == 0
+
+ SlackUserGroup.objects.update_or_create(
+ slack_id=slack_id,
+ slack_team_identity=slack_team_identity,
+ defaults={
+ "name": usergroup_name,
+ "handle": usergroup_handle,
+ "members": members,
+ "is_active": is_active,
+ "last_populated": timezone.now().date(),
+ },
+ )
+
+
+class SlackUserGroupMembersChangedEventStep(scenario_step.ScenarioStep):
+ tags = [
+ scenario_step.ScenarioStep.TAG_TRIGGERED_BY_SYSTEM,
+ ]
+
+ # Avoid logging this step to prevent collecting sensitive data of our customers
+ need_to_be_logged = False
+
+ def process_scenario(self, slack_user_identity, slack_team_identity, payload, action=None):
+ """
+ Triggered by action: changed members in user group.
+ """
+ SlackUserGroup = apps.get_model("slack", "SlackUserGroup")
+
+ slack_id = payload["event"]["subteam_id"]
+ try:
+ user_group = slack_team_identity.usergroups.get(slack_id=slack_id)
+ except SlackUserGroup.DoesNotExist:
+ # If Slack group does not exist, create and populate it
+ SlackUserGroup.update_or_create_slack_usergroup_from_slack(slack_id, slack_team_identity)
+ else:
+ # else update its members from payload
+ members = set(user_group.members)
+ members_added = payload["event"]["added_users"]
+ members_removed = payload["event"]["removed_users"]
+ members.update(members_added)
+ members.difference_update(members_removed)
+
+ user_group.members = list(members)
+ user_group.save(update_fields=["members"])
+
+
+STEPS_ROUTING = [
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_EVENT_CALLBACK,
+ "event_type": scenario_step.EVENT_TYPE_SUBTEAM_CREATED,
+ "step": SlackUserGroupEventStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_EVENT_CALLBACK,
+ "event_type": scenario_step.EVENT_TYPE_SUBTEAM_UPDATED,
+ "step": SlackUserGroupEventStep,
+ },
+ {
+ "payload_type": scenario_step.PAYLOAD_TYPE_EVENT_CALLBACK,
+ "event_type": scenario_step.EVENT_TYPE_SUBTEAM_MEMBERS_CHANGED,
+ "step": SlackUserGroupMembersChangedEventStep,
+ },
+]
diff --git a/engine/apps/slack/scenarios/step_mixins.py b/engine/apps/slack/scenarios/step_mixins.py
new file mode 100644
index 0000000000..ba6217cd90
--- /dev/null
+++ b/engine/apps/slack/scenarios/step_mixins.py
@@ -0,0 +1,70 @@
+import logging
+from abc import ABC, abstractmethod
+
+logger = logging.getLogger(__name__)
+
+
+class AccessControl(ABC):
+ ALLOWED_ROLES = []
+ ACTION_VERBOSE = ""
+
+ def dispatch(self, slack_user_identity, slack_team_identity, payload, action=None):
+ if self.check_membership():
+ return super().dispatch(slack_user_identity, slack_team_identity, payload, action=None)
+ else:
+ self.send_denied_message(payload)
+
+ def check_membership(self):
+ return self.user.role in self.ALLOWED_ROLES
+
+ @abstractmethod
+ def send_denied_message(self, payload):
+ pass
+
+
+class IncidentActionsAccessControlMixin(AccessControl):
+ """
+ Mixin for auth in incident actions
+ """
+
+ def send_denied_message_to_channel(self, payload=None):
+ # Send denied message to thread by default
+ return False
+
+ def send_denied_message(self, payload):
+ try:
+ thread_ts = payload["message_ts"]
+ except KeyError:
+ thread_ts = payload["message"]["ts"]
+ self._slack_client.api_call(
+ "chat.postMessage",
+ channel=payload["channel"]["id"],
+ attachments=[
+ {
+ "callback_id": "alert",
+ "text": "Attempted to {} by {}, but failed due to a lack of permissions.".format(
+ self.ACTION_VERBOSE,
+ self.user.get_user_verbal_for_team_for_slack(),
+ ),
+ },
+ ],
+ thread_ts=None if self.send_denied_message_to_channel(payload) else thread_ts,
+ unfurl_links=True,
+ )
+
+
+class CheckAlertIsUnarchivedMixin(object):
+
+ ALLOWED_ROLES = []
+
+ ACTION_VERBOSE = ""
+
+ def check_alert_is_unarchived(self, slack_team_identity, payload, alert_group, warning=True):
+ alert_group_is_unarchived = alert_group.started_at.date() > self.organization.archive_alerts_from
+ if not alert_group_is_unarchived:
+ if warning:
+ warning_text = "Action is impossible: the Alert is archived."
+ self.open_warning_window(payload, warning_text)
+ if not alert_group.resolved or not alert_group.is_archived:
+ alert_group.resolve_by_archivation()
+ return alert_group_is_unarchived
diff --git a/engine/apps/slack/slack_client/__init__.py b/engine/apps/slack/slack_client/__init__.py
new file mode 100644
index 0000000000..528f1aa01d
--- /dev/null
+++ b/engine/apps/slack/slack_client/__init__.py
@@ -0,0 +1 @@
+from .slack_client import SlackClientWithErrorHandling # noqa: F401
diff --git a/engine/apps/slack/slack_client/exceptions.py b/engine/apps/slack/slack_client/exceptions.py
new file mode 100644
index 0000000000..d2c032b12b
--- /dev/null
+++ b/engine/apps/slack/slack_client/exceptions.py
@@ -0,0 +1,22 @@
+class SlackAPIException(Exception):
+ def __init__(self, *args, **kwargs):
+ self.response = {}
+ if "response" in kwargs:
+ self.response = kwargs["response"]
+ super().__init__(*args)
+
+
+class SlackAPITokenException(SlackAPIException):
+ pass
+
+
+class SlackAPIChannelArchivedException(SlackAPIException):
+ pass
+
+
+class SlackAPIRateLimitException(SlackAPIException):
+ pass
+
+
+class SlackClientException(Exception):
+ pass
diff --git a/engine/apps/slack/slack_client/slack_client.py b/engine/apps/slack/slack_client/slack_client.py
new file mode 100644
index 0000000000..8d0c47364e
--- /dev/null
+++ b/engine/apps/slack/slack_client/slack_client.py
@@ -0,0 +1,109 @@
+import logging
+
+from django.apps import apps
+from django.utils import timezone
+from slackclient import SlackClient
+from slackclient.exceptions import TokenRefreshError
+
+from apps.slack.constants import SLACK_RATE_LIMIT_DELAY
+
+from .exceptions import (
+ SlackAPIChannelArchivedException,
+ SlackAPIException,
+ SlackAPIRateLimitException,
+ SlackAPITokenException,
+ SlackClientException,
+)
+from .slack_client_server import SlackClientServer
+
+logger = logging.getLogger(__name__)
+
+
+class SlackClientWithErrorHandling(SlackClient):
+ def __init__(self, token=None, **kwargs):
+ """
+ This method is rewritten because we want to use custom server SlackClientServer for SlackClient
+ """
+ super().__init__(token=token, **kwargs)
+
+ proxies = kwargs.get("proxies")
+
+ if self.refresh_token:
+ if callable(self.token_update_callback):
+ token = None
+ else:
+ raise TokenRefreshError("Token refresh callback function is required when using refresh token.")
+ # Slack app configs
+ self.server = SlackClientServer(token=token, connect=False, proxies=proxies)
+
+ def paginated_api_call(self, *args, **kwargs):
+ # It's a key from response which is paginated. For example "users" or "channels"
+ listed_key = kwargs["paginated_key"]
+
+ response = self.api_call(*args, **kwargs)
+ cumulative_response = response
+
+ while (
+ "response_metadata" in response
+ and "next_cursor" in response["response_metadata"]
+ and response["response_metadata"]["next_cursor"] != ""
+ ):
+ kwargs["cursor"] = response["response_metadata"]["next_cursor"]
+ response = self.api_call(*args, **kwargs)
+ cumulative_response[listed_key] += response[listed_key]
+
+ return cumulative_response
+
+ def api_call(self, *args, **kwargs):
+ DynamicSetting = apps.get_model("base", "DynamicSetting")
+
+ simulate_slack_downtime = DynamicSetting.objects.get_or_create(
+ name="simulate_slack_downtime", defaults={"boolean_value": False}
+ )[0]
+
+ if simulate_slack_downtime.boolean_value:
+ # When slack is down it returns 503 with no response.text which leads to JSONDecodeError.
+ # We handle it in SlackClientServer and raise SlackClientException instead
+ raise SlackClientException("Slack Downtime Simulation")
+
+ response = super(SlackClientWithErrorHandling, self).api_call(*args, **kwargs)
+
+ if not response["ok"]:
+
+ exception_text = "Slack API Call Error: {} \nArgs: {} \nKwargs: {} \nResponse: {}".format(
+ response["error"], args, kwargs, response
+ )
+
+ if response["error"] == "is_archived":
+ raise SlackAPIChannelArchivedException(exception_text, response=response)
+
+ if response["error"] == "rate_limited" or response["error"] == "ratelimited":
+ if "headers" in response and response["headers"].get("Retry-After") is not None:
+ delay = int(response["headers"]["Retry-After"])
+ else:
+ delay = SLACK_RATE_LIMIT_DELAY
+ response["rate_limit_delay"] = delay
+ raise SlackAPIRateLimitException(exception_text, response=response)
+
+ if response["error"] == "code_already_used":
+ return response
+
+ # Optionally detect account_inactive
+ if response["error"] == "account_inactive" or response["error"] == "token_revoked":
+ if "team" in kwargs:
+ team_identity = kwargs["team"]
+ del kwargs["team"]
+ team_identity.detected_token_revoked = timezone.now()
+ team_identity.is_profile_populated = False
+ team_identity.save(update_fields=["detected_token_revoked", "is_profile_populated"])
+ raise SlackAPITokenException(exception_text, response=response)
+ else:
+ if "team" in kwargs:
+ slack_team_identity = kwargs["team"]
+ if slack_team_identity.detected_token_revoked:
+ slack_team_identity.detected_token_revoked = None
+ slack_team_identity.save(update_fields=["detected_token_revoked"])
+
+ raise SlackAPIException(exception_text, response=response)
+
+ return response
diff --git a/engine/apps/slack/slack_client/slack_client_server.py b/engine/apps/slack/slack_client/slack_client_server.py
new file mode 100644
index 0000000000..235e084fe6
--- /dev/null
+++ b/engine/apps/slack/slack_client/slack_client_server.py
@@ -0,0 +1,26 @@
+import json
+
+from slackclient.server import Server
+
+from .exceptions import SlackClientException
+
+
+class SlackClientServer(Server):
+ def api_call(self, token, request="?", timeout=None, **kwargs):
+ """
+ This method is rewritten because we want to handle JSONDecodeError and add more information about response
+ """
+ response = self.api_requester.do(token, request, kwargs, timeout=timeout)
+ response_json = {"headers": dict(response.headers)}
+ resp_text = response.text
+ try:
+ response_json.update(json.loads(resp_text))
+ except json.JSONDecodeError:
+ response_json["response_text"] = resp_text
+ exception_text = (
+ f"Slack API Call Error: unexpected response from Slack \n"
+ f"Status: {response.status_code}\nArgs: ('{request}',) \nKwargs: {kwargs} \n"
+ f"Response: {response_json}"
+ )
+ raise SlackClientException(exception_text)
+ return json.dumps(response_json)
diff --git a/engine/apps/slack/slack_formatter.py b/engine/apps/slack/slack_formatter.py
new file mode 100644
index 0000000000..488d77f781
--- /dev/null
+++ b/engine/apps/slack/slack_formatter.py
@@ -0,0 +1,99 @@
+import re
+
+import emoji
+from django.apps import apps
+from slackviewer.formatter import SlackFormatter
+
+
+class SlackFormatter(SlackFormatter):
+ _LINK_PAT = re.compile(r"<(https|http|mailto):[A-Za-z0-9_\.\-\/\?\,\=\#\:\@\& ]+\|[^>]+>")
+
+ def __init__(self, organization):
+ self.__ORGANIZATION = organization
+ self.channel_mention_format = "#{}"
+ self.user_mention_format = "@{}"
+ self.hyperlink_mention_format = '{title} '
+
+ def find_user(self, message):
+ raise NotImplementedError()
+
+ def format(self, message):
+ """
+ Overriden original render_text method.
+ Now it is responsible only for formatting slack mentions, channel names, etc.
+ """
+ if message is None:
+ return
+ message = message.replace("", "@channel")
+ message = message.replace("", "@channel")
+ message = message.replace("", "@here")
+ message = message.replace("", "@here")
+ message = message.replace("", "@everyone")
+ message = message.replace("", "@everyone")
+ message = self._slack_to_accepted_emoji(message)
+
+ # Handle mentions of users, channels and bots (e.g "<@U0BM1CGQY|calvinchanubc> has joined the channel")
+ message = self._MENTION_PAT.sub(self._sub_annotated_mention, message)
+ # Handle links
+ message = self._LINK_PAT.sub(self._sub_hyperlink, message)
+ # Introduce unicode emoji
+ message = emoji.emojize(message, use_aliases=True)
+
+ return message
+
+ def _sub_hyperlink(self, matchobj):
+ compound = matchobj.group(0)[1:-1]
+ if len(compound.split("|")) == 2:
+ url, title = compound.split("|")
+ else:
+ url, title = compound, compound
+ result = self.hyperlink_mention_format.format(url=url, title=title)
+ return result
+
+ def _sub_annotated_mention(self, matchobj):
+ """
+ Overrided method to use db search instead of self.__USER_DATA and self.__CHANNEL_DATA (see original method)
+ to search channels and users by their slack_ids
+ """
+ # Matchobj have format or
+ ref_id = matchobj.group(1)[1:] # drop #/@ from the start, we don't care.
+ annotation = matchobj.group(2)
+ # check if mention channel
+ if ref_id.startswith("C"):
+ mention_format = self.channel_mention_format
+ # channel could be mentioned only with its slack_id
+ if not annotation:
+ # search channel_name by slack_id in cache
+ annotation = self._sub_annotated_mention_slack_channel(ref_id)
+ else: # Same for user
+ mention_format = self.user_mention_format
+ if not annotation:
+ annotation = self._sub_annotated_mention_slack_user(ref_id)
+ return mention_format.format(annotation)
+
+ def _sub_annotated_mention_slack_channel(self, ref_id):
+ channel = None
+ slack_team_identity = self.__ORGANIZATION.slack_team_identity
+ if slack_team_identity is not None:
+ cached_channels = slack_team_identity.get_cached_channels(slack_id=ref_id)
+ if len(cached_channels) > 0:
+ channel = cached_channels[0].name
+ annotation = channel if channel else ref_id
+ else:
+ annotation = ref_id
+ return annotation
+
+ def _sub_annotated_mention_slack_user(self, ref_id):
+ SlackUserIdentity = apps.get_model("slack", "SlackUserIdentity")
+
+ slack_user_identity = SlackUserIdentity.objects.filter(
+ slack_team_identity=self.__ORGANIZATION.slack_team_identity, slack_id=ref_id
+ ).first()
+
+ annotation = ref_id
+ if slack_user_identity is not None:
+ if slack_user_identity.profile_display_name:
+ annotation = slack_user_identity.profile_display_name
+ elif slack_user_identity.slack_verbal:
+ annotation = slack_user_identity.slack_verbal
+ return annotation
diff --git a/engine/apps/slack/tasks.py b/engine/apps/slack/tasks.py
new file mode 100644
index 0000000000..48c688bec8
--- /dev/null
+++ b/engine/apps/slack/tasks.py
@@ -0,0 +1,781 @@
+import logging
+import random
+import time
+
+from celery.utils.log import get_task_logger
+from django.apps import apps
+from django.conf import settings
+from django.core.cache import cache
+from django.utils import timezone
+
+from apps.alerts.tasks.compare_escalations import compare_escalations
+from apps.public_api import constants as public_constants
+from apps.public_api.constants import DEMO_SLACK_USER_GROUP_ID
+from apps.slack.constants import CACHE_UPDATE_INCIDENT_SLACK_MESSAGE_LIFETIME, SLACK_BOT_ID
+from apps.slack.scenarios.escalation_delivery import EscalationDeliveryStep
+from apps.slack.scenarios.scenario_step import ScenarioStep
+from apps.slack.slack_client import SlackClientWithErrorHandling
+from apps.slack.slack_client.exceptions import SlackAPIException, SlackAPITokenException
+from apps.slack.utils import get_cache_key_update_incident_slack_message, post_message_to_channel
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+from common.utils import batch_queryset
+
+logger = get_task_logger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True)
+def update_incident_slack_message(slack_team_identity_pk, alert_group_pk):
+ cache_key = get_cache_key_update_incident_slack_message(alert_group_pk)
+ cached_task_id = cache.get(cache_key)
+ current_task_id = update_incident_slack_message.request.id
+
+ if cached_task_id is None:
+ update_task_id = update_incident_slack_message.apply_async(
+ (slack_team_identity_pk, alert_group_pk),
+ countdown=10,
+ )
+ cache.set(cache_key, update_task_id, timeout=CACHE_UPDATE_INCIDENT_SLACK_MESSAGE_LIFETIME)
+
+ return (
+ f"update_incident_slack_message rescheduled because of current task_id ({current_task_id})"
+ f" for alert_group {alert_group_pk} doesn't exist in cache"
+ )
+ if not current_task_id == cached_task_id:
+ return (
+ f"update_incident_slack_message skipped, because of current task_id ({current_task_id})"
+ f" doesn't equal to cached task_id ({cached_task_id}) for alert_group {alert_group_pk}"
+ )
+
+ SlackTeamIdentity = apps.get_model("slack", "SlackTeamIdentity")
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+
+ slack_team_identity = SlackTeamIdentity.objects.get(pk=slack_team_identity_pk)
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
+
+ if alert_group.skip_escalation_in_slack or alert_group.channel.is_rate_limited_in_slack:
+ return "Skip message update in Slack due to rate limit"
+ if alert_group.slack_message is None:
+ return "Skip message update in Slack due to absence of slack message"
+ ScenarioStep(slack_team_identity, alert_group.channel.organization)._update_slack_message(alert_group)
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True)
+def check_slack_message_exists_before_post_message_to_thread(
+ alert_group_pk,
+ text,
+ escalation_policy_pk=None,
+ escalation_policy_step=None,
+ step_specific_info=None,
+):
+ """
+ Check if slack message for current alert group exists before before posting a message to a thread in slack.
+ If it does not exist - restart task every 10 seconds for 24 hours.
+ """
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ EscalationPolicy = apps.get_model("alerts", "EscalationPolicy")
+
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
+ slack_team_identity = alert_group.channel.organization.slack_team_identity
+ # get escalation policy object if it exists to save it in log record
+ escalation_policy = EscalationPolicy.objects.filter(pk=escalation_policy_pk).first()
+
+ # we cannot post message to thread if team does not have slack team identity
+ if not slack_team_identity:
+ AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_FAILED,
+ alert_group=alert_group,
+ escalation_policy=escalation_policy,
+ escalation_error_code=AlertGroupLogRecord.ERROR_ESCALATION_NOTIFY_IN_SLACK,
+ escalation_policy_step=escalation_policy_step,
+ step_specific_info=step_specific_info,
+ ).save()
+ logger.debug(
+ f"Failed to post message to thread in Slack for alert_group {alert_group_pk} because "
+ f"slack team identity doesn't exist"
+ )
+ return
+ retry_timeout_hours = 24
+ slack_message = alert_group.get_slack_message()
+
+ if slack_message is not None:
+ EscalationDeliveryStep(slack_team_identity, alert_group.channel.organization).notify_thread_about_action(
+ alert_group, text
+ )
+ # check how much time has passed since alert group was created
+ # to prevent eternal loop of restarting check_slack_message_before_post_message_to_thread
+ elif timezone.now() < alert_group.started_at + timezone.timedelta(hours=retry_timeout_hours):
+ logger.debug(
+ f"check_slack_message_exists_before_post_message_to_thread for alert_group {alert_group.pk} failed "
+ f"because slack message does not exist. Restarting check_slack_message_before_post_message_to_thread."
+ )
+ restart_delay_seconds = 10
+ check_slack_message_exists_before_post_message_to_thread.apply_async(
+ (
+ alert_group_pk,
+ text,
+ escalation_policy_pk,
+ escalation_policy_step,
+ step_specific_info,
+ ),
+ countdown=restart_delay_seconds,
+ )
+ else:
+ logger.debug(
+ f"check_slack_message_exists_before_post_message_to_thread for alert_group {alert_group.pk} failed "
+ f"because slack message after {retry_timeout_hours} hours still does not exist"
+ )
+ # create log if it was triggered by escalation step
+ if escalation_policy_step:
+ AlertGroupLogRecord(
+ type=AlertGroupLogRecord.TYPE_ESCALATION_FAILED,
+ alert_group=alert_group,
+ escalation_policy=escalation_policy,
+ escalation_error_code=AlertGroupLogRecord.ERROR_ESCALATION_NOTIFY_IN_SLACK,
+ escalation_policy_step=escalation_policy_step,
+ step_specific_info=step_specific_info,
+ ).save()
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True)
+def resolve_archived_incidents_for_organization(organization_id):
+ Organization = apps.get_model("user_management", "Organization")
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+
+ organization = Organization.objects.get(pk=organization_id)
+
+ alert_groups_queryset = AlertGroup.unarchived_objects.filter(
+ channel__organization=organization,
+ started_at__date__lte=organization.archive_alerts_from,
+ resolved=False,
+ )
+
+ for alert_group in alert_groups_queryset:
+ try:
+ alert_group.resolve_by_archivation()
+ except SlackAPIException as e:
+ if e.response["error"] == "channel_not_found": # Todo: investigate and remove this hack
+ print(e)
+ elif e.response["error"] == "rate_limited" or e.response["error"] == "ratelimited":
+ if "headers" in e.response and e.response["headers"].get("Retry-After") is not None:
+ delay = int(e.response["headers"]["Retry-After"])
+ else:
+ delay = random.randint(1, 10)
+ resolve_archived_incidents_for_organization.apply_async((organization_id,), countdown=delay)
+ else:
+ raise e
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True)
+def unarchive_incidents_for_organization(organization_id):
+ Organization = apps.get_model("user_management", "Organization")
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ SlackMessage = apps.get_model("slack", "SlackMessage")
+
+ organization = Organization.objects.get(pk=organization_id)
+
+ alert_groups_queryset = AlertGroup.all_objects.filter(
+ channel__organization=organization,
+ started_at__date__gt=organization.archive_alerts_from,
+ is_archived=True,
+ )
+ # convert qs to list to prevent it from changing after qs update
+ alert_groups_with_slack_message = list(
+ alert_groups_queryset.select_related("slack_message").filter(slack_message__isnull=False)
+ )
+
+ alert_groups_queryset.update(is_archived=False)
+ slack_team_identity = organization.slack_team_identity
+ if slack_team_identity is not None:
+ sc = SlackClientWithErrorHandling(slack_team_identity.bot_access_token)
+ slack_messages_to_create = []
+
+ for alert_group_with_slack_message in alert_groups_with_slack_message:
+ try:
+ result = sc.api_call(
+ "chat.postMessage",
+ channel=alert_group_with_slack_message.slack_message.channel_id,
+ thread_ts=alert_group_with_slack_message.slack_message.slack_id,
+ text="Incident has been unarchived",
+ )
+ except SlackAPIException as e:
+ if e.response["error"] == "channel_not_found":
+ print(e)
+ elif e.response["error"] == "rate_limited" or e.response["error"] == "ratelimited":
+ if "headers" in e.response and e.response["headers"].get("Retry-After") is not None:
+ delay = int(e.response["headers"]["Retry-After"])
+ else:
+ delay = random.randint(1, 10)
+ time.sleep(delay)
+ else:
+ raise e
+ else:
+ slack_message = SlackMessage(
+ slack_id=result["ts"],
+ organization=organization,
+ _slack_team_identity=slack_team_identity,
+ channel_id=alert_group_with_slack_message.slack_message.channel_id,
+ alert_group_id=alert_group_with_slack_message.pk,
+ )
+ slack_messages_to_create.append(slack_message)
+
+ SlackMessage.objects.bulk_create(slack_messages_to_create, batch_size=5000)
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=1)
+def send_message_to_thread_if_bot_not_in_channel(alert_group_pk, slack_team_identity_pk, channel_id):
+ """
+ Send message to alert group's thread if bot is not in current channel
+ """
+
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ SlackTeamIdentity = apps.get_model("slack", "SlackTeamIdentity")
+
+ slack_team_identity = SlackTeamIdentity.objects.get(pk=slack_team_identity_pk)
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
+
+ sc = SlackClientWithErrorHandling(slack_team_identity.bot_access_token)
+
+ bot_user_id = slack_team_identity.bot_user_id
+ members = slack_team_identity.get_conversation_members(sc, channel_id)
+ if bot_user_id not in members:
+ text = f"Please invite <@{bot_user_id}> to this channel to make all features " f"available :wink:"
+ attachments = [
+ {
+ "text": text,
+ }
+ ]
+ ScenarioStep(slack_team_identity)._publish_message_to_thread(alert_group, attachments)
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=1)
+def send_debug_message_to_thread(alert_group_pk, slack_team_identity_pk):
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ SlackTeamIdentity = apps.get_model("slack", "SlackTeamIdentity")
+ SlackMessage = apps.get_model("slack", "SlackMessage")
+
+ slack_team_identity = SlackTeamIdentity.objects.get(pk=slack_team_identity_pk)
+ current_alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
+ try:
+ channel_id = current_alert_group.slack_message.channel_id
+ except AttributeError:
+ print("SlackMessage object doesn't exist for the alert group")
+ return None
+
+ blocks = []
+ text = "Escalations are silenced due to Debug mode"
+ blocks.append({"type": "section", "text": {"type": "mrkdwn", "text": text}})
+ sc = SlackClientWithErrorHandling(slack_team_identity.bot_access_token)
+
+ result = sc.api_call(
+ "chat.postMessage",
+ channel=channel_id,
+ attachments=[],
+ thread_ts=current_alert_group.slack_message.slack_id,
+ mrkdwn=True,
+ blocks=blocks,
+ )
+ SlackMessage(
+ slack_id=result["ts"],
+ organization=current_alert_group.channel.organization,
+ _slack_team_identity=slack_team_identity,
+ channel_id=channel_id,
+ alert_group=current_alert_group,
+ ).save()
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=0)
+def unpopulate_slack_user_identities(organization_pk, force=False, ts=None):
+ User = apps.get_model("user_management", "User")
+ Organization = apps.get_model("user_management", "Organization")
+
+ organization = Organization.objects.get(pk=organization_pk)
+
+ users_to_update = []
+ for user in organization.users.filter(slack_user_identity__isnull=False):
+ user.slack_user_identity = None
+ users_to_update.append(user)
+
+ User.objects.bulk_update(users_to_update, ["slack_user_identity"], batch_size=5000)
+
+ if force:
+ organization.slack_team_identity = None
+ organization.general_log_channel_id = None
+ organization.save()
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=0)
+def populate_slack_user_identities(organization_pk):
+
+ SlackUserIdentity = apps.get_model("slack", "SlackUserIdentity")
+
+ Organization = apps.get_model("user_management", "Organization")
+
+ organization = Organization.objects.get(pk=organization_pk)
+ unpopulate_slack_user_identities(organization_pk)
+ slack_team_identity = organization.slack_team_identity
+
+ slack_user_identity_installed = slack_team_identity.installed_by
+ slack_user_identities_to_update = []
+
+ for member in slack_team_identity.members:
+
+ profile = member.get("profile")
+ email = profile.get("email", None)
+
+ # Don't collect bots, invited users and users from other workspaces
+ if (
+ member.get("id", None) == SLACK_BOT_ID
+ or member.get("is_bot", False)
+ or not email
+ or member.get("is_invited_user", False)
+ or member.get("is_restricted")
+ or member.get("is_ultra_restricted")
+ ):
+ continue
+
+ # For user which installs bot
+ if member.get("id", None) == slack_user_identity_installed.slack_id:
+ slack_user_identity = slack_user_identity_installed
+ else:
+ try:
+ slack_user_identity, _ = slack_team_identity.slack_user_identities.get(
+ slack_id=member["id"],
+ )
+ except SlackUserIdentity.DoesNotExist:
+ continue
+
+ slack_user_identity.cached_slack_login = member.get("name", None)
+ slack_user_identity.cached_name = member.get("real_name") or profile.get("real_name", None)
+ slack_user_identity.cached_slack_email = profile.get("email", "")
+
+ slack_user_identity.profile_real_name = profile.get("real_name", None)
+ slack_user_identity.profile_real_name_normalized = profile.get("real_name_normalized", None)
+ slack_user_identity.profile_display_name = profile.get("display_name", None)
+ slack_user_identity.profile_display_name_normalized = profile.get("display_name_normalized", None)
+ slack_user_identity.cached_avatar = profile.get("image_512", None)
+ slack_user_identity.cached_timezone = member.get("tz", None)
+
+ slack_user_identity.deleted = member.get("deleted", None)
+ slack_user_identity.is_admin = member.get("is_admin", None)
+ slack_user_identity.is_owner = member.get("is_owner", None)
+ slack_user_identity.is_primary_owner = member.get("is_primary_owner", None)
+ slack_user_identity.is_restricted = member.get("is_restricted", None)
+ slack_user_identity.is_ultra_restricted = member.get("is_ultra_restricted", None)
+ slack_user_identity.cached_is_bot = member.get("is_bot", None) # This fields already existed
+ slack_user_identity.is_app_user = member.get("is_app_user", None)
+
+ slack_user_identities_to_update.append(slack_user_identity)
+
+ fields_to_update = [
+ "cached_slack_login",
+ "cached_name",
+ "cached_slack_email",
+ "profile_real_name",
+ "profile_real_name_normalized",
+ "profile_display_name",
+ "profile_display_name_normalized",
+ "cached_avatar",
+ "cached_timezone",
+ "deleted",
+ "is_admin",
+ "is_owner",
+ "is_primary_owner",
+ "is_restricted",
+ "is_ultra_restricted",
+ "cached_is_bot",
+ "is_app_user",
+ ]
+ SlackUserIdentity.objects.bulk_update(slack_user_identities_to_update, fields_to_update, batch_size=5000)
+
+
+@shared_dedicated_queue_retry_task()
+def refresh_slack_user_identity_emails():
+ SlackUserIdentity = apps.get_model("slack", "SlackUserIdentity")
+
+ qs = (
+ SlackUserIdentity.all_objects.filter(cached_slack_email="")
+ .exclude(deleted=True)
+ .exclude(cached_is_bot=True)
+ .exclude(
+ cached_name="user_not_found",
+ )
+ .exclude(slack_team_identity__cached_name="no_enough_permissions_to_retrieve")
+ .exclude(slack_team_identity__detected_token_revoked__isnull=False)
+ )
+
+ total = qs.count()
+ for index, slack_user_identity in enumerate(qs, start=1):
+ try:
+ sc = SlackClientWithErrorHandling(slack_user_identity.slack_team_identity.bot_access_token)
+ result = sc.api_call("users.info", user=slack_user_identity.slack_id)
+
+ if "email" in result.get("user").get("profile", None):
+ slack_user_identity.cached_slack_email = result["user"]["profile"]["email"]
+ slack_user_identity.save(update_fields=["cached_slack_email"])
+ logger.info(f"({index}/{total}). Email is found")
+ elif result.get("user").get("is_bot") is True or result.get("user").get("id") == SLACK_BOT_ID:
+ slack_user_identity.cached_is_bot = True
+ slack_user_identity.save(update_fields=["cached_is_bot"])
+ logger.info(f"({index}/{total}). Bot is found")
+ elif result.get("user").get("deleted") is True:
+ slack_user_identity.deleted = True
+ slack_user_identity.save(update_fields=["deleted"])
+ logger.info(f"({index}/{total}). Deleted is found")
+ elif result.get("user").get("is_stranger", False):
+ # case: strangers or external members,
+ # see https://api.slack.com/enterprise/shared-channels
+ slack_user_identity.is_stranger = True
+ slack_user_identity.save(update_fields=["is_stranger"])
+ logger.info(f"({index}/{total}). Stranger or external user detected.")
+ else:
+ logger.error(
+ f"({index}/{total}). Error!!! Email definition error for SlackUserIdentity pk: "
+ f"{slack_user_identity.pk}. It will be generated unknown_email."
+ )
+ except SlackAPIException as e:
+ # case: user_not_found
+ if e.response["error"] == "user_not_found":
+ slack_user_identity.is_not_found = True
+ slack_user_identity.save(update_fields=["is_not_found"])
+ logger.info(f"({index}/{total}). User_not_found detected.")
+ else:
+ logger.error(f"({index}/{total}). Error!!! Exception: {e}")
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def post_or_update_log_report_message_task(alert_group_pk, slack_team_identity_pk, update=False):
+ logger.debug(f"Start post_or_update_log_report_message_task for alert_group {alert_group_pk}")
+ AlertGroup = apps.get_model("alerts", "AlertGroup")
+ SlackTeamIdentity = apps.get_model("slack", "SlackTeamIdentity")
+ UpdateLogReportMessageStep = ScenarioStep.get_step("distribute_alerts", "UpdateLogReportMessageStep")
+
+ slack_team_identity = SlackTeamIdentity.objects.get(pk=slack_team_identity_pk)
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
+ step = UpdateLogReportMessageStep(slack_team_identity, alert_group.channel.organization)
+
+ if alert_group.skip_escalation_in_slack or alert_group.channel.is_rate_limited_in_slack:
+ return
+
+ if update: # flag to prevent multiple posting log message to slack
+ step.update_log_message(alert_group)
+ else:
+ step.post_log_message(alert_group)
+ logger.debug(f"Finish post_or_update_log_report_message_task for alert_group {alert_group_pk}")
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def post_slack_rate_limit_message(integration_id):
+ AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
+ integration = AlertReceiveChannel.objects.get(pk=integration_id)
+ if not compare_escalations(post_slack_rate_limit_message.request.id, integration.rate_limit_message_task_id):
+ logger.info(
+ f"post_slack_rate_limit_message. integration {integration_id}. ID mismatch. "
+ f"Active: {integration.rate_limit_message_task_id}"
+ )
+ return
+ default_route = integration.channel_filters.get(is_default=True)
+ slack_channel = default_route.slack_channel_id_or_general_log_id
+ if slack_channel:
+ text = (
+ f"Delivering and updating incidents of integration {integration.verbal_name} in Slack is "
+ f"temporarily stopped due to rate limit. You could find new incidents at "
+ f"<{integration.new_incidents_web_link}|web page "
+ '"Incidents">'
+ )
+ post_message_to_channel(integration.organization, slack_channel, text)
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def populate_slack_usergroups():
+ SlackTeamIdentity = apps.get_model("slack", "SlackTeamIdentity")
+
+ slack_team_identities = SlackTeamIdentity.objects.filter(
+ detected_token_revoked__isnull=True,
+ ).exclude(slack_id=public_constants.DEMO_SLACK_TEAM_ID)
+
+ delay = 0
+ counter = 0
+
+ for qs in batch_queryset(slack_team_identities, 5000):
+ for slack_team_identity in qs:
+ counter += 1
+ # increase delay to prevent slack ratelimit
+ if counter % 8 == 0:
+ delay += 60
+ populate_slack_usergroups_for_team.apply_async((slack_team_identity.pk,), countdown=delay)
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def populate_slack_usergroups_for_team(slack_team_identity_id):
+ SlackTeamIdentity = apps.get_model("slack", "SlackTeamIdentity")
+ SlackUserGroup = apps.get_model("slack", "SlackUserGroup")
+
+ slack_team_identity = SlackTeamIdentity.objects.get(pk=slack_team_identity_id)
+ sc = SlackClientWithErrorHandling(slack_team_identity.bot_access_token)
+
+ def handle_usergroups_list_slack_api_exception(exception):
+ if exception.response["error"] == "plan_upgrade_required":
+ logger.info(f"SlackTeamIdentity with pk {slack_team_identity.pk} does not have access to User Groups")
+ elif exception.response["error"] == "invalid_auth":
+ logger.warning(f"invalid_auth, SlackTeamIdentity pk: {slack_team_identity.pk}")
+ # in some cases slack rate limit error looks like 'rate_limited', in some - 'ratelimited', be aware
+ elif exception.response["error"] == "rate_limited" or exception.response["error"] == "ratelimited":
+ delay = random.randint(5, 25) * 60
+ logger.warning(
+ f"'usergroups.list' slack api error: rate_limited. SlackTeamIdentity pk: {slack_team_identity.pk}."
+ f"Delay populate_slack_usergroups_for_team task by {delay // 60} min."
+ )
+ return populate_slack_usergroups_for_team.apply_async((slack_team_identity_id,), countdown=delay)
+ elif exception.response["error"] == "missing_scope":
+ logger.warning(
+ f"'usergroups.users.list' slack api error: missing_scope. "
+ f"SlackTeamIdentity pk: {slack_team_identity.pk}.\n{exception}"
+ )
+ return
+ else:
+ logger.error(
+ f"'usergroups.list' slack api error. SlackTeamIdentity pk: {slack_team_identity.pk}\n{exception}"
+ )
+ raise exception
+
+ usergroups_list = None
+ bot_access_token_accepted = True
+ try:
+ usergroups_list = sc.api_call(
+ "usergroups.list",
+ )
+ except SlackAPITokenException as e:
+ logger.info(f"token revoked\n{e}")
+ except SlackAPIException as e:
+ if e.response["error"] == "not_allowed_token_type":
+ try:
+ # Trying same request with access token. It is required due to migration to granular permissions
+ # and can be removed after clients reinstall their bots
+ sc_with_access_token = SlackClientWithErrorHandling(slack_team_identity.access_token)
+ usergroups_list = sc_with_access_token.api_call(
+ "usergroups.list",
+ )
+ bot_access_token_accepted = False
+ except SlackAPIException as err:
+ handle_usergroups_list_slack_api_exception(err)
+ else:
+ handle_usergroups_list_slack_api_exception(e)
+ if usergroups_list is not None:
+ today = timezone.now().date()
+ populated_user_groups_ids = slack_team_identity.usergroups.filter(last_populated=today).values_list(
+ "slack_id", flat=True
+ )
+
+ for usergroup in usergroups_list["usergroups"]:
+ # skip groups that were recently populated
+ if usergroup["id"] in populated_user_groups_ids:
+ continue
+ try:
+ if bot_access_token_accepted:
+ usergroups_users = sc.api_call(
+ "usergroups.users.list",
+ usergroup=usergroup["id"],
+ )
+ else:
+ sc_with_access_token = SlackClientWithErrorHandling(slack_team_identity.access_token)
+ usergroups_users = sc_with_access_token.api_call(
+ "usergroups.users.list",
+ usergroup=usergroup["id"],
+ )
+ except SlackAPIException as e:
+ if e.response["error"] == "no_such_subteam":
+ logger.info("User group does not exist")
+ elif e.response["error"] == "missing_scope":
+ logger.warning(
+ f"'usergroups.users.list' slack api error: missing_scope. "
+ f"SlackTeamIdentity pk: {slack_team_identity.pk}.\n{e}"
+ )
+ return
+ elif e.response["error"] == "invalid_auth":
+ logger.warning(f"invalid_auth, SlackTeamIdentity pk: {slack_team_identity.pk}")
+ # in some cases slack rate limit error looks like 'rate_limited', in some - 'ratelimited', be aware
+ elif e.response["error"] == "rate_limited" or e.response["error"] == "ratelimited":
+ delay = random.randint(5, 25) * 60
+ logger.warning(
+ f"'usergroups.users.list' slack api error: rate_limited. "
+ f"SlackTeamIdentity pk: {slack_team_identity.pk}."
+ f"Delay populate_slack_usergroups_for_team task by {delay // 60} min."
+ )
+ return populate_slack_usergroups_for_team.apply_async((slack_team_identity_id,), countdown=delay)
+ else:
+ logger.error(
+ f"'usergroups.users.list' slack api error. "
+ f"SlackTeamIdentity pk: {slack_team_identity.pk}\n{e}"
+ )
+ raise e
+ else:
+ usergroup_name = usergroup["name"]
+ usergroup_handle = usergroup["handle"]
+ usergroup_members = usergroups_users["users"]
+ usergroup_is_active = usergroup["date_delete"] == 0
+
+ SlackUserGroup.objects.update_or_create(
+ slack_id=usergroup["id"],
+ slack_team_identity=slack_team_identity,
+ defaults={
+ "name": usergroup_name,
+ "handle": usergroup_handle,
+ "members": usergroup_members,
+ "is_active": usergroup_is_active,
+ "last_populated": today,
+ },
+ )
+
+
+@shared_dedicated_queue_retry_task()
+def start_update_slack_user_group_for_schedules():
+ SlackUserGroup = apps.get_model("slack", "SlackUserGroup")
+
+ user_group_pks = (
+ SlackUserGroup.objects.exclude(public_primary_key=DEMO_SLACK_USER_GROUP_ID)
+ .filter(oncall_schedules__isnull=False)
+ .distinct()
+ .values_list("pk", flat=True)
+ )
+
+ for user_group_pk in user_group_pks:
+ update_slack_user_group_for_schedules.delay(user_group_pk=user_group_pk)
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=3)
+def update_slack_user_group_for_schedules(user_group_pk):
+ SlackUserGroup = apps.get_model("slack", "SlackUserGroup")
+
+ try:
+ user_group = SlackUserGroup.objects.get(pk=user_group_pk)
+ except SlackUserGroup.DoesNotExist:
+ logger.warning(f"Slack user group {user_group_pk} does not exist")
+ return
+
+ user_group.update_oncall_members()
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def populate_slack_channels():
+ SlackTeamIdentity = apps.get_model("slack", "SlackTeamIdentity")
+
+ slack_team_identities = SlackTeamIdentity.objects.filter(
+ detected_token_revoked__isnull=True,
+ ).exclude(slack_id=public_constants.DEMO_SLACK_TEAM_ID)
+
+ delay = 0
+ counter = 0
+
+ for qs in batch_queryset(slack_team_identities, 5000):
+ for slack_team_identity in qs:
+ counter += 1
+ # increase delay to prevent slack ratelimit
+ if counter % 8 == 0:
+ delay += 60
+ populate_slack_channels_for_team.apply_async((slack_team_identity.pk,), countdown=delay)
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+def populate_slack_channels_for_team(slack_team_identity_id):
+ SlackTeamIdentity = apps.get_model("slack", "SlackTeamIdentity")
+ SlackChannel = apps.get_model("slack", "SlackChannel")
+
+ slack_team_identity = SlackTeamIdentity.objects.get(pk=slack_team_identity_id)
+ sc = SlackClientWithErrorHandling(slack_team_identity.bot_access_token)
+
+ try:
+ response = sc.paginated_api_call(
+ "conversations.list", types="public_channel,private_channel", paginated_key="channels", limit=1000
+ )
+ except SlackAPITokenException as e:
+ logger.info(f"token revoked\n{e}")
+ except SlackAPIException as e:
+ if e.response["error"] == "invalid_auth":
+ logger.warning(
+ f"invalid_auth while populating slack channels, SlackTeamIdentity pk: {slack_team_identity.pk}"
+ )
+ # in some cases slack rate limit error looks like 'rate_limited', in some - 'ratelimited', be aware
+ elif e.response["error"] == "rate_limited" or e.response["error"] == "ratelimited":
+ delay = random.randint(5, 25) * 60
+ logger.warning(
+ f"'conversations.list' slack api error: rate_limited. SlackTeamIdentity pk: {slack_team_identity.pk}."
+ f"Delay populate_slack_channels_for_team task by {delay//60} min."
+ )
+ return populate_slack_channels_for_team.apply_async((slack_team_identity_id,), countdown=delay)
+ elif e.response["error"] == "missing_scope":
+ logger.warning(
+ f"conversations.list' slack api error: missing_scope. "
+ f"SlackTeamIdentity pk: {slack_team_identity.pk}.\n{e}"
+ )
+ return
+ else:
+ logger.error(f"'conversations.list' slack api error. SlackTeamIdentity pk: {slack_team_identity.pk}\n{e}")
+ raise e
+ else:
+ today = timezone.now().date()
+
+ slack_channels = {channel["id"]: channel for channel in response["channels"]}
+ existing_channels = slack_team_identity.cached_channels.all()
+ existing_channel_ids = set(existing_channels.values_list("slack_id", flat=True))
+
+ # create missing channels
+ channels_to_create = tuple(
+ SlackChannel(
+ slack_team_identity=slack_team_identity,
+ slack_id=channel["id"],
+ name=channel["name"],
+ is_archived=channel["is_archived"],
+ is_shared=channel["is_shared"],
+ last_populated=today,
+ )
+ for channel in slack_channels.values()
+ if channel["id"] not in existing_channel_ids
+ )
+ SlackChannel.objects.bulk_create(channels_to_create, batch_size=5000)
+
+ # delete excess channels
+ channel_ids_to_delete = existing_channel_ids - slack_channels.keys()
+ slack_team_identity.cached_channels.filter(slack_id__in=channel_ids_to_delete).delete()
+
+ # update existing channels
+ channels_to_update = existing_channels.exclude(slack_id__in=channel_ids_to_delete)
+ for channel in channels_to_update:
+ slack_channel = slack_channels[channel.slack_id]
+ channel.name = slack_channel["name"]
+ channel.is_archived = slack_channel["is_archived"]
+ channel.is_shared = slack_channel["is_shared"]
+ channel.last_populated = today
+
+ SlackChannel.objects.bulk_update(
+ channels_to_update, fields=("name", "is_archived", "is_shared", "last_populated"), batch_size=5000
+ )
+
+
+@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=0)
+def clean_slack_integration_leftovers(organization_id, *args, **kwargs):
+ """
+ This task removes binding to slack (e.g ChannelFilter's slack channel) for a given organization.
+ It is used when user changes slack integration.
+ """
+ ChannelFilter = apps.get_model("alerts", "ChannelFilter")
+ OnCallSchedule = apps.get_model("schedules", "OnCallSchedule")
+ logger.info(f"Start clean slack leftovers for organization {organization_id}")
+ ChannelFilter.objects.filter(alert_receive_channel__organization_id=organization_id).update(slack_channel_id=None)
+ logger.info(f"Cleaned ChannelFilters slack_channel_id for organization {organization_id}")
+ OnCallSchedule.objects.filter(organization_id=organization_id).update(channel=None)
+ logger.info(f"Cleaned OnCallSchedule slack_channel_id for organization {organization_id}")
+ logger.info(f"Finish clean slack leftovers for organization {organization_id}")
diff --git a/engine/apps/slack/templates/admin/slack_teams_summary_change_list.html b/engine/apps/slack/templates/admin/slack_teams_summary_change_list.html
new file mode 100644
index 0000000000..f9691f8f8c
--- /dev/null
+++ b/engine/apps/slack/templates/admin/slack_teams_summary_change_list.html
@@ -0,0 +1,76 @@
+{% extends "admin/change_list.html" %}
+{% block content_title %}
+ Slack Team Summary
+{% endblock %}
+{% block result_list %}
+
+
+
Daily Active Teams:
+
+
+ {% for x in summary_over_time %}
+
+
+ {{x.total | default:0 }}
+ {{x.period | date:"d/m/Y"}}
+
+
+ {% endfor %}
+
+
+
+
Registered Teams:
+
+
+ {% for x in registered_teams %}
+
+
+ {{x.total | default:0 }}
+ {{x.period | date:"d/m/Y"}}
+
+
+ {% endfor %}
+
+
+
+
+{% endblock %}
+{% block pagination %}{% endblock %}
\ No newline at end of file
diff --git a/engine/apps/slack/tests/__init__.py b/engine/apps/slack/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/slack/tests/conftest.py b/engine/apps/slack/tests/conftest.py
new file mode 100644
index 0000000000..ff8057ee1a
--- /dev/null
+++ b/engine/apps/slack/tests/conftest.py
@@ -0,0 +1,12 @@
+import pytest
+
+
+@pytest.fixture
+def get_slack_team_and_slack_user(make_organization_and_user_with_slack_identities):
+ def _make_slack_team_and_slack_user(organization, user):
+ slack_team_identity = organization.slack_team_identity
+ slack_user_identity = user.slack_user_identity
+
+ return slack_team_identity, slack_user_identity
+
+ return _make_slack_team_and_slack_user
diff --git a/engine/apps/slack/tests/factories.py b/engine/apps/slack/tests/factories.py
new file mode 100644
index 0000000000..77506fc095
--- /dev/null
+++ b/engine/apps/slack/tests/factories.py
@@ -0,0 +1,59 @@
+import factory
+
+from apps.slack.models import (
+ SlackActionRecord,
+ SlackChannel,
+ SlackMessage,
+ SlackTeamIdentity,
+ SlackUserGroup,
+ SlackUserIdentity,
+)
+from common.utils import UniqueFaker
+
+
+class SlackTeamIdentityFactory(factory.DjangoModelFactory):
+ slack_id = UniqueFaker("word")
+ cached_name = factory.Faker("word")
+
+ class Meta:
+ model = SlackTeamIdentity
+
+
+class SlackUserIdentityFactory(factory.DjangoModelFactory):
+ slack_id = UniqueFaker("word")
+ cached_avatar = "TEST_SLACK_IMAGE_URL"
+ cached_name = "TEST_SLACK_NAME"
+ cached_slack_login = "TEST_SLACK_LOGIN"
+
+ class Meta:
+ model = SlackUserIdentity
+
+
+class SlackUserGroupFactory(factory.DjangoModelFactory):
+ slack_id = UniqueFaker("word")
+ name = factory.Faker("word")
+ handle = factory.Faker("word")
+
+ class Meta:
+ model = SlackUserGroup
+
+
+class SlackChannelFactory(factory.DjangoModelFactory):
+ slack_id = UniqueFaker("word")
+ name = factory.Faker("word")
+
+ class Meta:
+ model = SlackChannel
+
+
+class SlackMessageFactory(factory.DjangoModelFactory):
+ slack_id = UniqueFaker("word")
+ channel_id = factory.Faker("word")
+
+ class Meta:
+ model = SlackMessage
+
+
+class SlackActionRecordFactory(factory.DjangoModelFactory):
+ class Meta:
+ model = SlackActionRecord
diff --git a/engine/apps/slack/tests/test_create_message_blocks.py b/engine/apps/slack/tests/test_create_message_blocks.py
new file mode 100644
index 0000000000..638c99da4f
--- /dev/null
+++ b/engine/apps/slack/tests/test_create_message_blocks.py
@@ -0,0 +1,56 @@
+from apps.slack.utils import create_message_blocks
+
+
+def test_long_text():
+ original_text = "1" * 3000 + "\n" + "2" * 3000 + "\n" + "3" * 3000
+
+ message_block_dict = [
+ {
+ "type": "section",
+ "text": {"type": "mrkdwn", "text": "1" * 3000 + "```"},
+ },
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": "```" + "2" * 3000 + "```",
+ },
+ },
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": "```" + "3" * 3000 + "```",
+ },
+ },
+ ]
+ assert message_block_dict == create_message_blocks(original_text)
+
+
+def test_truncation_long_text():
+ original_text = "t" * 3000 + "\n" + "truncated"
+
+ expected_message_blocks = [
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": "t" * 3000 + "```",
+ },
+ },
+ {
+ "type": "section",
+ "text": {"type": "mrkdwn", "text": "```truncated```"},
+ },
+ ]
+ message_blocks = create_message_blocks(original_text)
+ assert expected_message_blocks == message_blocks
+
+
+def test_short_text():
+ """Any short text test case"""
+
+ original_text = "test" * 100
+
+ message_block_dict = [{"type": "section", "text": {"type": "mrkdwn", "text": original_text}}]
+ assert message_block_dict == create_message_blocks(original_text)
diff --git a/engine/apps/slack/tests/test_parse_slack_usernames.py b/engine/apps/slack/tests/test_parse_slack_usernames.py
new file mode 100644
index 0000000000..df43c950a2
--- /dev/null
+++ b/engine/apps/slack/tests/test_parse_slack_usernames.py
@@ -0,0 +1,56 @@
+from apps.schedules.ical_utils import parse_username_from_string
+
+
+def test_one_username():
+ assert parse_username_from_string("bob") == "bob"
+
+
+def test_mixed_languages_username():
+ assert parse_username_from_string("bobиванtannhäuser夕海") == "bobиванtannhäuser夕海"
+
+
+def test_username_with_spaces():
+ assert parse_username_from_string("bob smith") == "bob smith"
+ assert parse_username_from_string(" bob smith ") == "bob smith"
+
+
+def test_username_with_hyphen():
+ assert parse_username_from_string("bob-smith") == "bob-smith"
+
+
+def test_username_with_punctiation():
+ assert parse_username_from_string("bob-smith") == "bob-smith"
+ assert parse_username_from_string("bob.smith") == "bob.smith"
+ assert parse_username_from_string("bob'smith") == "bob'smith"
+ assert parse_username_from_string("bob;smith") == "bob;smith"
+ assert parse_username_from_string("bob,smith") == "bob,smith"
+ assert parse_username_from_string("bob/smith") == "bob/smith"
+ assert parse_username_from_string("bob)([]{}") == "bob)([]{}"
+
+
+def test_non_space_delimiter():
+ assert parse_username_from_string("@bob:@alex") == "@bob:@alex"
+ assert parse_username_from_string("@bob@@alex") == "@bob@@alex"
+ assert parse_username_from_string("@bob@alex") == "@bob@alex"
+
+
+def test_numeric_username():
+ assert parse_username_from_string("bob1") == "bob1"
+ assert parse_username_from_string("1") == "1"
+
+
+def test_email_address_username():
+ assert parse_username_from_string("bob@bob.com") == "bob@bob.com"
+
+
+def test_grafana_username():
+ assert parse_username_from_string("!@#%^&*()_+[];',./\\|") == "!@#%^&*()_+[];',./\\|"
+
+
+def test_remove_priority_from_username():
+ assert parse_username_from_string("[L1]bob") == "bob"
+ assert parse_username_from_string("[L1] bob") == "bob"
+ assert parse_username_from_string(" [L1] bob ") == "bob"
+ assert parse_username_from_string("[L2] bob[L1]") == "bob[L1]"
+ assert parse_username_from_string("[L27]bob") == "[L27]bob"
+ assert parse_username_from_string("[[L2]] bob[[[L1]") == "[[L2]] bob[[[L1]"
diff --git a/engine/apps/slack/tests/test_populate_slack_channels.py b/engine/apps/slack/tests/test_populate_slack_channels.py
new file mode 100644
index 0000000000..42f031aea5
--- /dev/null
+++ b/engine/apps/slack/tests/test_populate_slack_channels.py
@@ -0,0 +1,47 @@
+from unittest.mock import patch
+
+import pytest
+from django.utils import timezone
+
+from apps.slack.slack_client import SlackClientWithErrorHandling
+from apps.slack.tasks import populate_slack_channels_for_team
+
+
+@pytest.mark.django_db
+def test_populate_slack_channels_for_team(make_organization_with_slack_team_identity, make_slack_channel):
+ organization, slack_team_identity = make_organization_with_slack_team_identity()
+
+ yesterday = (timezone.now() - timezone.timedelta(days=1)).date()
+ _ = tuple(
+ make_slack_channel(
+ slack_team_identity=slack_team_identity, slack_id=slack_id, name=name, last_populated=yesterday
+ )
+ for slack_id, name in (
+ ("C111111111", "test1"),
+ ("C222222222", "test2"),
+ ("C444444444", "test4"),
+ )
+ )
+
+ response = {
+ "channels": (
+ {"id": "C111111111", "name": "test1", "is_archived": False, "is_shared": False},
+ {"id": "C222222222", "name": "test_changed_name", "is_archived": False, "is_shared": True},
+ {"id": "C333333333", "name": "test3", "is_archived": False, "is_shared": True},
+ )
+ }
+ with patch.object(SlackClientWithErrorHandling, "paginated_api_call", return_value=response):
+ populate_slack_channels_for_team(slack_team_identity.pk)
+
+ channels = slack_team_identity.cached_channels.all()
+
+ expected_channel_ids = set(channel["id"] for channel in response["channels"])
+ actual_channel_ids = set(channels.values_list("slack_id", flat=True))
+ assert expected_channel_ids == actual_channel_ids
+
+ assert not channels.filter(slack_id="C444444444").exists()
+
+ second_channel = channels.get(slack_id="C222222222")
+ assert second_channel.name == "test_changed_name"
+
+ assert not channels.filter(last_populated__lte=yesterday).exists()
diff --git a/engine/apps/slack/tests/test_reset_slack.py b/engine/apps/slack/tests/test_reset_slack.py
new file mode 100644
index 0000000000..229f153416
--- /dev/null
+++ b/engine/apps/slack/tests/test_reset_slack.py
@@ -0,0 +1,34 @@
+from unittest.mock import patch
+
+import pytest
+from django.conf import settings
+from django.urls import reverse
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.test import APIClient
+
+from common.constants.role import Role
+
+
+@pytest.mark.django_db
+@pytest.mark.parametrize(
+ "role,expected_status",
+ [
+ (Role.ADMIN, status.HTTP_200_OK),
+ (Role.EDITOR, status.HTTP_403_FORBIDDEN),
+ (Role.VIEWER, status.HTTP_403_FORBIDDEN),
+ ],
+)
+def test_reset_slack_integration_permissions(
+ make_organization_and_user_with_plugin_token, role, expected_status, load_slack_urls, make_user_auth_headers
+):
+ settings.FEATURE_SLACK_INTEGRATION_ENABLED = True
+
+ _, user, token = make_organization_and_user_with_plugin_token(role)
+ client = APIClient()
+
+ url = reverse("reset-slack")
+ with patch("apps.slack.views.ResetSlackView.post", return_value=Response(status=status.HTTP_200_OK)):
+ response = client.post(url, format="json", **make_user_auth_headers(user, token))
+
+ assert response.status_code == expected_status
diff --git a/engine/apps/slack/tests/test_scenario_steps/__init__.py b/engine/apps/slack/tests/test_scenario_steps/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/slack/tests/test_scenario_steps/test_distribute_alerts.py b/engine/apps/slack/tests/test_scenario_steps/test_distribute_alerts.py
new file mode 100644
index 0000000000..053b7f74e6
--- /dev/null
+++ b/engine/apps/slack/tests/test_scenario_steps/test_distribute_alerts.py
@@ -0,0 +1,35 @@
+from unittest.mock import patch
+
+import pytest
+
+from apps.alerts.models import AlertGroup
+from apps.slack.models import SlackMessage
+from apps.slack.scenarios.scenario_step import ScenarioStep
+from apps.slack.slack_client.exceptions import SlackAPIException
+
+
+@pytest.mark.django_db
+def test_restricted_action_error(
+ make_organization_and_user_with_slack_identities,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_alert,
+):
+ SlackAlertShootingStep = ScenarioStep.get_step("distribute_alerts", "AlertShootingStep")
+ organization, _, slack_team_identity, _ = make_organization_and_user_with_slack_identities()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+ alert = make_alert(alert_group, raw_request_data="{}")
+
+ step = SlackAlertShootingStep(slack_team_identity)
+
+ with patch.object(step._slack_client, "api_call") as mock_slack_api_call:
+ mock_slack_api_call.side_effect = SlackAPIException(response={"error": "restricted_action"})
+ step.publish_slack_messages(slack_team_identity, alert_group, alert, None, "channel-id", [])
+
+ alert_group.refresh_from_db()
+ alert.refresh_from_db()
+ assert alert_group.reason_to_skip_escalation == AlertGroup.RESTRICTED_ACTION
+ assert alert_group.slack_message is None
+ assert SlackMessage.objects.count() == 0
+ assert not alert.delivered
diff --git a/engine/apps/slack/tests/test_scenario_steps/test_resolution_note.py b/engine/apps/slack/tests/test_scenario_steps/test_resolution_note.py
new file mode 100644
index 0000000000..080f5d40e5
--- /dev/null
+++ b/engine/apps/slack/tests/test_scenario_steps/test_resolution_note.py
@@ -0,0 +1,103 @@
+import json
+from urllib.parse import urljoin
+
+import pytest
+from django.conf import settings
+
+from apps.slack.scenarios.scenario_step import ScenarioStep
+
+
+@pytest.mark.django_db
+def test_get_resolution_notes_blocks_default_if_empty(
+ make_organization_and_user_with_slack_identities,
+ make_alert_receive_channel,
+ make_alert_group,
+):
+ SlackResolutionNoteModalStep = ScenarioStep.get_step("resolution_note", "ResolutionNoteModalStep")
+ organization, _, slack_team_identity, _ = make_organization_and_user_with_slack_identities()
+ step = SlackResolutionNoteModalStep(slack_team_identity)
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ blocks = step.get_resolution_notes_blocks(alert_group, "", False)
+
+ link_to_instruction = urljoin(settings.BASE_URL, "static/images/postmortem.gif")
+ expected_blocks = [
+ {
+ "type": "divider",
+ },
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": ":bulb: You can add a message to the resolution notes via context menu:",
+ },
+ },
+ {
+ "type": "image",
+ "title": {
+ "type": "plain_text",
+ "text": "Add a resolution note",
+ },
+ "image_url": link_to_instruction,
+ "alt_text": "Add to postmortem context menu",
+ },
+ ]
+ assert blocks == expected_blocks
+
+
+@pytest.mark.django_db
+def test_get_resolution_notes_blocks_non_empty(
+ make_organization_and_user_with_slack_identities,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_resolution_note_slack_message,
+):
+ SlackResolutionNoteModalStep = ScenarioStep.get_step("resolution_note", "ResolutionNoteModalStep")
+ organization, user, slack_team_identity, _ = make_organization_and_user_with_slack_identities()
+ step = SlackResolutionNoteModalStep(slack_team_identity)
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+ resolution_note = make_resolution_note_slack_message(alert_group=alert_group, user=user, added_by_user=user, ts=1)
+
+ blocks = step.get_resolution_notes_blocks(alert_group, "", False)
+
+ expected_blocks = [
+ {
+ "type": "divider",
+ },
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": "{} \n{}".format(
+ resolution_note.user.get_user_verbal_for_team_for_slack(mention=True),
+ float(resolution_note.ts),
+ resolution_note.text,
+ ),
+ },
+ "accessory": {
+ "type": "button",
+ "style": "primary",
+ "text": {
+ "type": "plain_text",
+ "text": "Add",
+ "emoji": True,
+ },
+ "action_id": "AddRemoveThreadMessageStep",
+ "value": json.dumps(
+ {
+ "resolution_note_window_action": "edit",
+ "msg_value": "add",
+ "message_pk": resolution_note.pk,
+ "resolution_note_pk": None,
+ "alert_group_pk": alert_group.pk,
+ }
+ ),
+ },
+ },
+ ]
+
+ assert blocks == expected_blocks
diff --git a/engine/apps/slack/tests/test_scenario_steps/test_slack_usergroup_steps.py b/engine/apps/slack/tests/test_scenario_steps/test_slack_usergroup_steps.py
new file mode 100644
index 0000000000..b0e0c8ed74
--- /dev/null
+++ b/engine/apps/slack/tests/test_scenario_steps/test_slack_usergroup_steps.py
@@ -0,0 +1,120 @@
+import pytest
+
+from apps.slack.models import SlackUserGroup
+from apps.slack.scenarios.scenario_step import ScenarioStep
+
+
+def get_user_group_event_payload(slack_team_identity, slack_user_identity):
+ slack_team_id = slack_team_identity.slack_id
+ slack_user_id = slack_user_identity.slack_id
+ payload = {
+ "team_id": slack_team_id,
+ "event": {
+ "type": "subteam_updated",
+ "subteam": {
+ "id": "S017H64MD5K",
+ "team_id": slack_team_id,
+ "is_usergroup": True,
+ "is_subteam": True,
+ "name": "Test User Group",
+ "description": "",
+ "handle": "test-user-group",
+ "is_external": False,
+ "date_create": 1595430081,
+ "date_update": 1595913736,
+ "date_delete": 0,
+ "auto_type": None,
+ "auto_provision": False,
+ "enterprise_subteam_id": "",
+ "created_by": slack_user_id,
+ "updated_by": slack_user_id,
+ "deleted_by": None,
+ "prefs": {"channels": [], "groups": []},
+ "users": [slack_user_id],
+ "user_count": 1,
+ "channel_count": 0,
+ },
+ "event_ts": "1595924845.008900",
+ },
+ "type": "event_callback",
+ "event_id": "Ev017UPSP1AP",
+ "event_time": 1595924845,
+ "authed_users": ["W0188BV77AL"],
+ }
+ return payload
+
+
+def get_user_group_members_changed_event_payload(slack_team_identity, slack_user_identity, user_group):
+ slack_team_id = slack_team_identity.slack_id
+ slack_user_id = slack_user_identity.slack_id
+ slack_user_group_id = user_group.slack_id
+ payload = {
+ "team_id": slack_team_id,
+ "event": {
+ "type": "subteam_members_changed",
+ "subteam_id": slack_user_group_id,
+ "team_id": slack_team_id,
+ "date_previous_update": 1446670362,
+ "date_update": 1492906952,
+ "added_users": [slack_user_id],
+ "added_users_count": "3",
+ "removed_users": user_group.members,
+ "removed_users_count": "1",
+ "event_ts": "1595924845.008900",
+ },
+ "type": "event_callback",
+ "event_id": "Ev017UPSP1AP",
+ "event_time": 1595924845,
+ "authed_users": [slack_user_id],
+ }
+ return payload
+
+
+@pytest.mark.django_db
+def test_slack_user_group_event_step(
+ make_organization_and_user_with_slack_identities,
+ get_slack_team_and_slack_user,
+):
+ SlackUserGroupEventStep = ScenarioStep.get_step("slack_usergroup", "SlackUserGroupEventStep")
+
+ organization, user, _, _ = make_organization_and_user_with_slack_identities()
+ slack_team_identity, slack_user_identity = get_slack_team_and_slack_user(organization, user)
+ step = SlackUserGroupEventStep(slack_team_identity)
+ payload = get_user_group_event_payload(slack_team_identity, slack_user_identity)
+
+ step.process_scenario(slack_user_identity, slack_team_identity, payload)
+
+ user_group = SlackUserGroup.objects.filter(slack_id=payload["event"]["subteam"]["id"]).first()
+
+ assert user_group is not None
+ assert user_group.slack_team_identity == slack_team_identity
+ assert user_group.slack_id == payload["event"]["subteam"]["id"]
+ assert user_group.name == payload["event"]["subteam"]["name"]
+ assert user_group.handle == payload["event"]["subteam"]["handle"]
+ assert user_group.members == payload["event"]["subteam"]["users"]
+ assert user_group.members == [slack_user_identity.slack_id]
+ assert user_group.is_active == int(payload["event"]["subteam"]["date_delete"] == 0)
+
+
+@pytest.mark.django_db
+def test_slack_user_group_members_changed_event_step(
+ make_organization_and_user_with_slack_identities,
+ make_slack_user_group,
+):
+ SlackUserGroupMembersChangedEventStep = ScenarioStep.get_step(
+ "slack_usergroup", "SlackUserGroupMembersChangedEventStep"
+ )
+
+ organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities()
+ step = SlackUserGroupMembersChangedEventStep(slack_team_identity)
+ user_group_members = ["TESTUSER1", "TESTUSER2"]
+ user_group = make_slack_user_group(slack_team_identity=slack_team_identity, members=user_group_members)
+ assert user_group.members == user_group_members
+
+ # this payload removes existing group members (user_group_members)
+ # and adds slack_user_identity.slack_id as new member
+ payload = get_user_group_members_changed_event_payload(slack_team_identity, slack_user_identity, user_group)
+
+ step.process_scenario(slack_user_identity, slack_team_identity, payload)
+ user_group.refresh_from_db()
+ assert user_group.members == [slack_user_identity.slack_id]
diff --git a/engine/apps/slack/tests/test_user_group.py b/engine/apps/slack/tests/test_user_group.py
new file mode 100644
index 0000000000..e03d1a727d
--- /dev/null
+++ b/engine/apps/slack/tests/test_user_group.py
@@ -0,0 +1,69 @@
+from unittest.mock import PropertyMock, patch
+
+import pytest
+
+from apps.schedules.models.on_call_schedule import OnCallScheduleQuerySet
+from apps.slack.models import SlackUserGroup
+from apps.slack.slack_client import SlackClientWithErrorHandling
+
+
+@pytest.mark.django_db
+def test_update_members(make_organization_with_slack_team_identity, make_slack_user_group):
+ organization, slack_team_identity = make_organization_with_slack_team_identity()
+ user_group = make_slack_user_group(slack_team_identity)
+
+ slack_ids = ["slack_id_1", "slack_id_2"]
+
+ with patch.object(SlackClientWithErrorHandling, "api_call") as mock:
+ user_group.update_members(slack_ids)
+ mock.assert_called()
+
+ assert user_group.members == slack_ids
+
+
+@pytest.mark.django_db
+def test_oncall_slack_user_identities(
+ make_organization_with_slack_team_identity,
+ make_slack_user_group,
+ make_user_with_slack_user_identity,
+ make_user_for_organization,
+):
+ organization, slack_team_identity = make_organization_with_slack_team_identity()
+ user_group = make_slack_user_group(slack_team_identity)
+
+ user_1, slack_user_identity_1 = make_user_with_slack_user_identity(
+ slack_team_identity, organization, slack_id="user_1"
+ )
+ user_2, slack_user_identity_2 = make_user_with_slack_user_identity(
+ slack_team_identity, organization, slack_id="user_2"
+ )
+ user_3 = make_user_for_organization(organization)
+
+ with patch.object(OnCallScheduleQuerySet, "get_oncall_users", return_value=[user_1, user_2, user_3]):
+ assert user_group.oncall_slack_user_identities == [slack_user_identity_1, slack_user_identity_2]
+
+
+@pytest.mark.django_db
+def test_update_oncall_members(
+ make_organization_with_slack_team_identity,
+ make_slack_user_group,
+ make_user_with_slack_user_identity,
+):
+ organization, slack_team_identity = make_organization_with_slack_team_identity()
+ user_group = make_slack_user_group(slack_team_identity)
+
+ user_1, slack_user_identity_1 = make_user_with_slack_user_identity(
+ slack_team_identity, organization, slack_id="slack_id_1"
+ )
+ user_2, slack_user_identity_2 = make_user_with_slack_user_identity(
+ slack_team_identity, organization, slack_id="slack_id_2"
+ )
+
+ with patch.object(
+ SlackUserGroup, "oncall_slack_user_identities", new_callable=PropertyMock
+ ) as oncall_slack_user_identities_mock:
+ oncall_slack_user_identities_mock.return_value = [slack_user_identity_1, slack_user_identity_2]
+
+ with patch.object(SlackUserGroup, "update_members") as update_members_mock:
+ user_group.update_oncall_members()
+ update_members_mock.assert_called()
diff --git a/engine/apps/slack/urls.py b/engine/apps/slack/urls.py
new file mode 100644
index 0000000000..5584fd1301
--- /dev/null
+++ b/engine/apps/slack/urls.py
@@ -0,0 +1,24 @@
+from django.urls import path
+
+from .views import (
+ InstallLinkRedirectView,
+ OAuthSlackView,
+ ResetSlackView,
+ SignupRedirectView,
+ SlackEventApiEndpointView,
+ StopAnalyticsReporting,
+)
+
+urlpatterns = [
+ path("event_api_endpoint/", SlackEventApiEndpointView.as_view()),
+ path("interactive_api_endpoint/", SlackEventApiEndpointView.as_view()),
+ path("oauth/", OAuthSlackView.as_view()),
+ path("oauth///", OAuthSlackView.as_view()),
+ path("install_redirect/", InstallLinkRedirectView.as_view()),
+ path("install_redirect///", InstallLinkRedirectView.as_view()),
+ path("signup_redirect/", SignupRedirectView.as_view()),
+ path("signup_redirect///", SignupRedirectView.as_view()),
+ path("stop_analytics_reporting/", StopAnalyticsReporting.as_view()),
+ # Trailing / is missing here on purpose. QA the feature if you want to add it. No idea why doesn't it work with it.
+ path("reset_slack", ResetSlackView.as_view(), name="reset-slack"),
+]
diff --git a/engine/apps/slack/utils.py b/engine/apps/slack/utils.py
new file mode 100644
index 0000000000..d206a2fb4e
--- /dev/null
+++ b/engine/apps/slack/utils.py
@@ -0,0 +1,66 @@
+from textwrap import wrap
+
+from apps.slack.slack_client import SlackClientWithErrorHandling
+from apps.slack.slack_client.exceptions import SlackAPIException
+
+
+def create_message_blocks(text):
+ """This function checks text and return blocks
+
+ Maximum length for the text in section is 3000 characters and
+ we can include up to 50 blocks in each message.
+ https://api.slack.com/reference/block-kit/blocks#section
+
+ :param str text: Text for message blocks
+ :return list blocks: Blocks list
+ """
+
+ if len(text) <= 3000:
+ blocks = [{"type": "section", "text": {"type": "mrkdwn", "text": text}}]
+ else:
+ splitted_text_list = text.split("```\n")
+
+ if len(splitted_text_list) > 1:
+ splitted_text_list.pop()
+
+ blocks = []
+
+ for splitted_text in splitted_text_list:
+
+ if len(splitted_text) > 2996:
+ # too long text case
+ text_list = wrap(
+ splitted_text, 2994, expand_tabs=False, replace_whitespace=False, break_long_words=False
+ )
+
+ blocks.append({"type": "section", "text": {"type": "mrkdwn", "text": f"{text_list[0]}```"}})
+
+ for text_item in text_list[1:]:
+ blocks.append(
+ {"type": "section", "text": {"type": "mrkdwn", "text": f'```{text_item.strip("```")}```'}}
+ )
+ else:
+ blocks.append({"type": "section", "text": {"type": "mrkdwn", "text": splitted_text + "```\n"}})
+
+ return blocks
+
+
+def post_message_to_channel(organization, channel_id, text):
+ if organization.slack_team_identity:
+ slack_client = SlackClientWithErrorHandling(organization.slack_team_identity.bot_access_token)
+ try:
+ slack_client.api_call("chat.postMessage", channel=channel_id, text=text)
+ except SlackAPIException as e:
+ if e.response["error"] == "channel_not_found":
+ pass
+ else:
+ raise e
+
+
+def format_datetime_to_slack(timestamp, format="date_short"):
+ return f""
+
+
+def get_cache_key_update_incident_slack_message(alert_group_pk):
+ CACHE_KEY_PREFIX = "update_incident_slack_message"
+ return f"{CACHE_KEY_PREFIX}_{alert_group_pk}"
diff --git a/engine/apps/slack/views.py b/engine/apps/slack/views.py
new file mode 100644
index 0000000000..8988594c03
--- /dev/null
+++ b/engine/apps/slack/views.py
@@ -0,0 +1,545 @@
+import hashlib
+import hmac
+import json
+import logging
+from typing import Optional
+
+from django.conf import settings
+from django.http import HttpResponse
+from rest_framework import status
+from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.api.permissions import IsAdmin, MethodPermission
+from apps.auth_token.auth import PluginAuthentication
+from apps.base.utils import live_settings
+from apps.slack.scenarios.alertgroup_appearance import STEPS_ROUTING as ALERTGROUP_APPEARANCE_ROUTING
+from apps.slack.scenarios.distribute_alerts import STEPS_ROUTING as DISTRIBUTION_STEPS_ROUTING
+
+# Importing routes from scenarios
+from apps.slack.scenarios.onboarding import STEPS_ROUTING as ONBOARDING_STEPS_ROUTING
+from apps.slack.scenarios.profile_update import STEPS_ROUTING as PROFILE_UPDATE_ROUTING
+from apps.slack.scenarios.public_menu import STEPS_ROUTING as PUBLIC_MENU_ROUTING
+from apps.slack.scenarios.resolution_note import STEPS_ROUTING as RESOLUTION_NOTE_ROUTING
+from apps.slack.scenarios.scenario_step import (
+ EVENT_SUBTYPE_BOT_MESSAGE,
+ EVENT_SUBTYPE_FILE_SHARE,
+ EVENT_SUBTYPE_MESSAGE_CHANGED,
+ EVENT_SUBTYPE_MESSAGE_DELETED,
+ EVENT_TYPE_APP_MENTION,
+ EVENT_TYPE_MESSAGE,
+ EVENT_TYPE_MESSAGE_CHANNEL,
+ EVENT_TYPE_SUBTEAM_CREATED,
+ EVENT_TYPE_SUBTEAM_MEMBERS_CHANGED,
+ EVENT_TYPE_SUBTEAM_UPDATED,
+ EVENT_TYPE_USER_CHANGE,
+ PAYLOAD_TYPE_BLOCK_ACTIONS,
+ PAYLOAD_TYPE_DIALOG_SUBMISSION,
+ PAYLOAD_TYPE_EVENT_CALLBACK,
+ PAYLOAD_TYPE_INTERACTIVE_MESSAGE,
+ PAYLOAD_TYPE_MESSAGE_ACTION,
+ PAYLOAD_TYPE_SLASH_COMMAND,
+ PAYLOAD_TYPE_VIEW_SUBMISSION,
+ ScenarioStep,
+)
+from apps.slack.scenarios.schedules import STEPS_ROUTING as SCHEDULES_ROUTING
+from apps.slack.scenarios.slack_channel import STEPS_ROUTING as CHANNEL_ROUTING
+from apps.slack.scenarios.slack_channel_integration import STEPS_ROUTING as SLACK_CHANNEL_INTEGRATION_ROUTING
+from apps.slack.scenarios.slack_usergroup import STEPS_ROUTING as SLACK_USERGROUP_UPDATE_ROUTING
+from apps.slack.slack_client import SlackClientWithErrorHandling
+from apps.slack.slack_client.exceptions import SlackAPIException, SlackAPITokenException
+from apps.slack.tasks import clean_slack_integration_leftovers, unpopulate_slack_user_identities
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+
+from .models import SlackActionRecord, SlackMessage, SlackTeamIdentity, SlackUserIdentity
+
+SCENARIOS_ROUTES = [] # Add all other routes here
+SCENARIOS_ROUTES.extend(ONBOARDING_STEPS_ROUTING)
+SCENARIOS_ROUTES.extend(DISTRIBUTION_STEPS_ROUTING)
+SCENARIOS_ROUTES.extend(PUBLIC_MENU_ROUTING)
+SCENARIOS_ROUTES.extend(SCHEDULES_ROUTING)
+SCENARIOS_ROUTES.extend(SLACK_CHANNEL_INTEGRATION_ROUTING)
+SCENARIOS_ROUTES.extend(ALERTGROUP_APPEARANCE_ROUTING)
+SCENARIOS_ROUTES.extend(RESOLUTION_NOTE_ROUTING)
+SCENARIOS_ROUTES.extend(SLACK_USERGROUP_UPDATE_ROUTING)
+SCENARIOS_ROUTES.extend(CHANNEL_ROUTING)
+SCENARIOS_ROUTES.extend(PROFILE_UPDATE_ROUTING)
+
+logger = logging.getLogger(__name__)
+
+
+class StopAnalyticsReporting(APIView):
+ def get(self, request):
+ response = HttpResponse(
+ "Your app installation would not be tracked by analytics from backend, "
+ "use browser plugin to disable from a frontend side. "
+ )
+ response.set_cookie("no_track", True, max_age=10 * 360 * 24 * 60 * 60)
+ return response
+
+
+class InstallLinkRedirectView(APIView):
+ def get(self, request, subscription="free", utm="not_specified"):
+ return HttpResponse(("Sign up is not allowed"), status=status.HTTP_400_BAD_REQUEST)
+
+
+class SignupRedirectView(APIView):
+ def get(self, request, subscription="free", utm="not_specified"):
+ return HttpResponse(("Sign up is not allowed"), status=status.HTTP_400_BAD_REQUEST)
+
+
+class OAuthSlackView(APIView):
+ def get(self, request, format=None, subscription="free", utm="not_specified"):
+ return HttpResponse(("Sign up is not allowed"), status=status.HTTP_400_BAD_REQUEST)
+
+
+class SlackEventApiEndpointView(APIView):
+ @staticmethod
+ def verify_signature(timestamp, signature, body, secret):
+ # https://github.com/slackapi/python-slack-events-api/blob/master/slackeventsapi/server.py#L47
+
+ if hasattr(hmac, "compare_digest"):
+ req = str.encode("v0:" + str(timestamp) + ":") + body
+ request_hash = "v0=" + hmac.new(str.encode(secret), req, hashlib.sha256).hexdigest()
+ return hmac.compare_digest(request_hash, signature)
+
+ def get(self, request, format=None):
+ return Response("hello")
+
+ def post(self, request):
+ logger.info("Request id: {}".format(request.META.get("HTTP_X_REQUEST_ID")))
+ body = request.body
+
+ try:
+ slack_signature = request.META["HTTP_X_SLACK_SIGNATURE"]
+ slack_request_timestamp = request.META["HTTP_X_SLACK_REQUEST_TIMESTAMP"]
+ except KeyError:
+ logger.warning("X-Slack-Signature or X-Slack-Request_Timestamp don't exist, This request is not from slack")
+ return Response(status=403)
+
+ if not settings.DEBUG:
+ if not (
+ SlackEventApiEndpointView.verify_signature(
+ slack_request_timestamp, slack_signature, body, live_settings.SLACK_SIGNING_SECRET
+ )
+ or SlackEventApiEndpointView.verify_signature(
+ slack_request_timestamp, slack_signature, body, settings.SLACK_SIGNING_SECRET_LIVE
+ )
+ ):
+ return Response(status=403)
+
+ # Unifying payload
+ if "payload" in request.data:
+ payload = request.data["payload"]
+ else:
+ payload = request.data
+ if isinstance(payload, str):
+ payload = json.JSONDecoder().decode(payload)
+
+ # Checking if it's repeated Slack request
+ if "HTTP_X_SLACK_RETRY_NUM" in request.META and int(request.META["HTTP_X_SLACK_RETRY_NUM"]) > 1:
+ logger.critical(
+ "Slack retries {} time, request data: {}".format(request.META["HTTP_X_SLACK_RETRY_NUM"], request.data)
+ )
+ payload["amixr_slack_retries"] = request.META["HTTP_X_SLACK_RETRY_NUM"]
+
+ # Initial url verification
+ if "type" in payload and payload["type"] == "url_verification":
+ logger.critical("URL verification from Slack side. That's suspicious.")
+ return Response(payload["challenge"])
+
+ # Linking team
+ slack_team_identity = self._get_slack_team_identity_from_payload(payload)
+
+ if not slack_team_identity:
+ logger.info("Dropping request because it does not have SlackTeamIdentity.")
+ return Response()
+
+ # Means that slack_team_identity unpopulated
+ if not slack_team_identity.organizations.exists():
+ logger.warning(f"OnCall Team for SlackTeamIdentity is not detected, stop it!")
+ # Open pop-up to inform user why OnCall bot doesn't work if any action was triggered
+ warning_text = (
+ "OnCall is not able to process this action because this Slack workspace was "
+ "disconnected from OnCall. Please log in to the OnCall web interface and install "
+ "Slack Integration with this workspace again."
+ )
+ self._open_warning_window_if_needed(payload, slack_team_identity, warning_text)
+ return Response(status=200)
+
+ # Todo: the case when team has no keys is unexpected, investigation is required
+ if slack_team_identity.access_token is None and slack_team_identity.bot_access_token is None:
+ logger.info(f"Team {slack_team_identity.slack_id} has no keys, dropping request.")
+ return Response()
+
+ sc = SlackClientWithErrorHandling(slack_team_identity.bot_access_token)
+
+ if slack_team_identity.detected_token_revoked is not None:
+ # check if token is still invalid
+ try:
+ sc.api_call(
+ "auth.test",
+ team=slack_team_identity,
+ )
+ except SlackAPITokenException:
+ logger.info(f"Team {slack_team_identity.slack_id} has revoked token, dropping request.")
+ return Response(status=200)
+
+ Step = None
+ step_was_found = False
+
+ slack_user_id = None
+ user = None
+ # Linking user identity
+ slack_user_identity = None
+
+ if "event" in payload and payload["event"] is not None:
+ if ("user" in payload["event"]) and slack_team_identity and (payload["event"]["user"] is not None):
+ if "id" in payload["event"]["user"]:
+ slack_user_id = payload["event"]["user"]["id"]
+ elif type(payload["event"]["user"]) is str:
+ slack_user_id = payload["event"]["user"]
+ else:
+ raise Exception("Failed Linking user identity")
+
+ elif (
+ ("bot_id" in payload["event"])
+ and slack_team_identity
+ and (
+ payload["event"]["bot_id"] is not None
+ and "channel_type" in payload["event"]
+ and payload["event"]["channel_type"] == EVENT_TYPE_MESSAGE_CHANNEL
+ )
+ ):
+ response = sc.api_call("bots.info", bot=payload["event"]["bot_id"])
+ bot_user_id = response.get("bot", {}).get("user_id", "")
+
+ # Don't react on own bot's messages.
+ if bot_user_id == slack_team_identity.bot_user_id:
+ return Response(status=200)
+
+ elif "user" in payload["event"].get("message", {}):
+ slack_user_id = payload["event"]["message"]["user"]
+ # event subtype 'message_deleted'
+ elif "user" in payload["event"].get("previous_message", {}):
+ slack_user_id = payload["event"]["previous_message"]["user"]
+
+ if "user" in payload:
+ slack_user_id = payload["user"]["id"]
+
+ elif "user_id" in payload:
+ slack_user_id = payload["user_id"]
+
+ if slack_user_id is not None and slack_user_id != slack_team_identity.bot_user_id:
+ slack_user_identity = SlackUserIdentity.objects.filter(
+ slack_id=slack_user_id,
+ slack_team_identity=slack_team_identity,
+ ).first()
+
+ organization = self._get_organization_from_payload(payload, slack_team_identity)
+ logger.info("Organization: " + str(organization))
+ logger.info("SlackUserIdentity detected: " + str(slack_user_identity))
+
+ if not slack_user_identity:
+ if "type" in payload and payload["type"] == PAYLOAD_TYPE_EVENT_CALLBACK:
+ if payload["event"]["type"] in [
+ EVENT_TYPE_SUBTEAM_CREATED,
+ EVENT_TYPE_SUBTEAM_UPDATED,
+ EVENT_TYPE_SUBTEAM_MEMBERS_CHANGED,
+ ]:
+ logger.info("Slack event without user slack_id.")
+ elif payload["event"]["type"] == EVENT_TYPE_USER_CHANGE:
+ logger.info("Event user_change. Dropping request because it does not have SlackUserIdentity.")
+ return Response()
+ else:
+ logger.info("Dropping request because it does not have SlackUserIdentity.")
+ self._open_warning_for_unconnected_user(sc, payload)
+ return Response()
+ elif organization:
+ user = slack_user_identity.get_user(organization)
+ if not user:
+ # Means that user slack_user_identity is not in any organization, connected to this Slack workspace
+ warning_text = "Permission denied. Please connect your Slack account to OnCall."
+ # Open pop-up to inform user why OnCall bot doesn't work if any action was triggered
+ self._open_warning_window_if_needed(payload, slack_team_identity, warning_text)
+ return Response(status=200)
+
+ action_record = SlackActionRecord(user=user, organization=organization, payload=payload)
+
+ # Capture cases when we expect stateful message from user
+ if not step_was_found and "type" in payload and payload["type"] == PAYLOAD_TYPE_EVENT_CALLBACK:
+ # Message event is from channel
+ if (
+ payload["event"]["type"] == EVENT_TYPE_MESSAGE
+ and payload["event"]["channel_type"] == EVENT_TYPE_MESSAGE_CHANNEL
+ and (
+ "subtype" not in payload["event"]
+ or payload["event"]["subtype"] == EVENT_SUBTYPE_BOT_MESSAGE
+ or payload["event"]["subtype"] == EVENT_SUBTYPE_MESSAGE_CHANGED
+ or payload["event"]["subtype"] == EVENT_SUBTYPE_FILE_SHARE
+ or payload["event"]["subtype"] == EVENT_SUBTYPE_MESSAGE_DELETED
+ )
+ ):
+ print("Inside channel.messages event")
+ for route in SCENARIOS_ROUTES:
+ if (
+ "message_channel_type" in route
+ and payload["event"]["channel_type"] == route["message_channel_type"]
+ ):
+ Step = route["step"]
+ logger.info("Routing to {}".format(Step))
+ step = Step(slack_team_identity, organization, user)
+ step.dispatch(slack_user_identity, slack_team_identity, payload)
+ step_was_found = True
+ # We don't do anything on app mention, but we doesn't want to unsubscribe from this event yet.
+ if payload["event"]["type"] == EVENT_TYPE_APP_MENTION:
+ logger.info(f"Received event of type {EVENT_TYPE_APP_MENTION} from slack. Skipping.")
+ return Response(status=200)
+ # Routing to Steps based on routing rules
+ try:
+ if not step_was_found:
+ for route in SCENARIOS_ROUTES:
+ # Slash commands have to "type"
+ if "command" in payload and route["payload_type"] == PAYLOAD_TYPE_SLASH_COMMAND:
+ if payload["command"] in route["command_name"]:
+ Step = route["step"]
+ action_record.step = Step.routing_uid()
+ logger.info("Routing to {}".format(Step))
+ step = Step(slack_team_identity, organization, user)
+ step.dispatch(slack_user_identity, slack_team_identity, payload)
+ step_was_found = True
+
+ if "type" in payload and payload["type"] == route["payload_type"]:
+ if payload["type"] == PAYLOAD_TYPE_EVENT_CALLBACK:
+ if payload["event"]["type"] == route["event_type"]:
+ # event_name is used for stateful
+ if "event_name" not in route:
+ Step = route["step"]
+ action_record.step = Step.routing_uid()
+ logger.info("Routing to {}".format(Step))
+ step = Step(slack_team_identity, organization, user)
+ step.dispatch(slack_user_identity, slack_team_identity, payload)
+ step_was_found = True
+
+ if payload["type"] == PAYLOAD_TYPE_INTERACTIVE_MESSAGE:
+ for action in payload["actions"]:
+ if action["type"] == route["action_type"]:
+ # Action name may also contain action arguments.
+ # So only beginning is used for routing.
+ if action["name"].startswith(route["action_name"]):
+ Step = route["step"]
+ action_record.step = Step.routing_uid()
+ logger.info("Routing to {}".format(Step))
+ step = Step(slack_team_identity, organization, user)
+ result = step.dispatch(slack_user_identity, slack_team_identity, payload)
+ if result is not None:
+ return result
+ step_was_found = True
+
+ if payload["type"] == PAYLOAD_TYPE_BLOCK_ACTIONS:
+ for action in payload["actions"]:
+ if action["type"] == route["block_action_type"]:
+ if action["action_id"].startswith(route["block_action_id"]):
+ Step = route["step"]
+ action_record.step = Step.routing_uid()
+ logger.info("Routing to {}".format(Step))
+ step = Step(slack_team_identity, organization, user)
+ step.dispatch(slack_user_identity, slack_team_identity, payload)
+ step_was_found = True
+
+ if payload["type"] == PAYLOAD_TYPE_DIALOG_SUBMISSION:
+ if payload["callback_id"] == route["dialog_callback_id"]:
+ Step = route["step"]
+ action_record.step = Step.routing_uid()
+ logger.info("Routing to {}".format(Step))
+ step = Step(slack_team_identity, organization, user)
+ result = step.dispatch(slack_user_identity, slack_team_identity, payload)
+ if result is not None:
+ return result
+ step_was_found = True
+
+ if payload["type"] == PAYLOAD_TYPE_VIEW_SUBMISSION:
+ if payload["view"]["callback_id"].startswith(route["view_callback_id"]):
+ Step = route["step"]
+ action_record.step = Step.routing_uid()
+ logger.info("Routing to {}".format(Step))
+ step = Step(slack_team_identity, organization, user)
+ result = step.dispatch(slack_user_identity, slack_team_identity, payload)
+ if result is not None:
+ return result
+ step_was_found = True
+
+ if payload["type"] == PAYLOAD_TYPE_MESSAGE_ACTION:
+ if payload["callback_id"] in route["message_action_callback_id"]:
+ Step = route["step"]
+ action_record.step = Step.routing_uid()
+ logger.info("Routing to {}".format(Step))
+ step = Step(slack_team_identity, organization, user)
+ step.dispatch(slack_user_identity, slack_team_identity, payload)
+ step_was_found = True
+
+ finally:
+ if Step is not None and Step.need_to_be_logged and organization:
+ action_record.save()
+
+ if not step_was_found:
+ raise Exception("Step is undefined" + str(payload))
+
+ return Response(status=200)
+
+ def _get_slack_team_identity_from_payload(self, payload) -> Optional[SlackTeamIdentity]:
+ slack_team_identity = None
+
+ if "team" in payload:
+ slack_team_id = payload["team"]["id"]
+ elif "team_id" in payload:
+ slack_team_id = payload["team_id"]
+ else:
+ return slack_team_identity
+
+ try:
+ slack_team_identity = SlackTeamIdentity.objects.get(slack_id=slack_team_id)
+ except SlackTeamIdentity.DoesNotExist as e:
+ logger.warning("Team identity not detected, that's dangerous!" + str(e))
+ return slack_team_identity
+
+ def _get_organization_from_payload(self, payload, slack_team_identity):
+ message_ts = None
+ channel_id = None
+ organization = None
+
+ # view submission or actions in view
+ if "view" in payload:
+ organization_id = None
+ private_metadata = payload["view"].get("private_metadata")
+ # steps with private_metadata in which we know organization before open view
+ if private_metadata and "organization_id" in private_metadata:
+ organization_id = json.loads(private_metadata).get("organization_id")
+ # steps with organization selection in view (e.g. slash commands)
+ elif ScenarioStep.SELECT_ORGANIZATION_AND_ROUTE_BLOCK_ID in payload["view"].get("state", {}).get(
+ "values", {}
+ ):
+ payload_values = payload["view"]["state"]["values"]
+ selected_value = payload_values[ScenarioStep.SELECT_ORGANIZATION_AND_ROUTE_BLOCK_ID][
+ ScenarioStep.SELECT_ORGANIZATION_AND_ROUTE_BLOCK_ID
+ ]["selected_option"]["value"]
+ organization_id = int(selected_value.split("-")[0])
+ if organization_id:
+ organization = slack_team_identity.organizations.get(pk=organization_id)
+ return organization
+ # buttons and actions
+ elif payload.get("type") in [
+ PAYLOAD_TYPE_BLOCK_ACTIONS,
+ PAYLOAD_TYPE_INTERACTIVE_MESSAGE,
+ PAYLOAD_TYPE_MESSAGE_ACTION,
+ ]:
+ # for cases when we put organization_id into action value (e.g. public suggestion)
+ if (
+ payload.get("actions")
+ and payload["actions"][0].get("value", {})
+ and "organization_id" in payload["actions"][0]["value"]
+ ):
+ organization_id = int(json.loads(payload["actions"][0]["value"])["organization_id"])
+ organization = slack_team_identity.organizations.get(pk=organization_id)
+ return organization
+
+ channel_id = payload["channel"]["id"]
+ if "message" in payload:
+ message_ts = payload["message"].get("thread_ts") or payload["message"]["ts"]
+ # for interactive message
+ elif "message_ts" in payload:
+ message_ts = payload["message_ts"]
+ else:
+ return
+ # events
+ elif payload.get("type") == PAYLOAD_TYPE_EVENT_CALLBACK:
+ if "channel" in payload["event"]: # events without channel: user_change, events with subteam, etc.
+ channel_id = payload["event"]["channel"]
+
+ if "message" in payload["event"]:
+ message_ts = payload["event"]["message"].get("thread_ts") or payload["event"]["message"]["ts"]
+ elif "thread_ts" in payload["event"]:
+ message_ts = payload["event"]["thread_ts"]
+ else:
+ return
+
+ if not (message_ts and channel_id):
+ return
+
+ try:
+ slack_message = SlackMessage.objects.get(
+ slack_id=message_ts,
+ _slack_team_identity=slack_team_identity,
+ channel_id=channel_id,
+ )
+ except SlackMessage.DoesNotExist:
+ pass
+ else:
+ alert_group = slack_message.get_alert_group()
+ if alert_group:
+ organization = alert_group.channel.organization
+ return organization
+ return organization
+
+ def _open_warning_window_if_needed(self, payload, slack_team_identity, warning_text) -> None:
+ if payload.get("trigger_id") is not None:
+ step = ScenarioStep(slack_team_identity)
+ try:
+ step.open_warning_window(payload, warning_text)
+ except SlackAPIException as e:
+ logger.info(
+ f"Failed to open pop-up for unpopulated SlackTeamIdentity {slack_team_identity.pk}\n" f"Error: {e}"
+ )
+
+ def _open_warning_for_unconnected_user(self, slack_client, payload):
+ if payload.get("trigger_id") is None:
+ return
+
+ text = (
+ "Your Grafana account is not connected to your Slack account. :flushed:\n"
+ "That's very easy to fix. Please go to the *Grafana* -> *OnCall* -> *Users*, "
+ "choose *your profile* and click the *connect* button.\n"
+ ":rocket: :rocket: :rocket:"
+ )
+
+ view = {
+ "blocks": (
+ {"type": "section", "block_id": "section-identifier", "text": {"type": "mrkdwn", "text": text}},
+ ),
+ "type": "modal",
+ "callback_id": "modal-identifier",
+ "title": {
+ "type": "plain_text",
+ "text": "One more step!",
+ },
+ }
+ slack_client.api_call(
+ "views.open",
+ trigger_id=payload["trigger_id"],
+ view=view,
+ )
+
+
+class ResetSlackView(APIView):
+
+ permission_classes = (IsAuthenticated, MethodPermission)
+ authentication_classes = [PluginAuthentication]
+
+ method_permissions = {IsAdmin: {"POST"}}
+
+ def post(self, request):
+ organization = request.auth.organization
+ slack_team_identity = organization.slack_team_identity
+ if slack_team_identity is not None:
+ clean_slack_integration_leftovers.apply_async((organization.pk,))
+ description = f"Slack workspace {slack_team_identity.cached_name} was disconnected from organization"
+ create_organization_log(
+ organization, request.user, OrganizationLogType.TYPE_SLACK_WORKSPACE_DISCONNECTED, description
+ )
+ unpopulate_slack_user_identities(organization.pk, True)
+ response = Response(status=200)
+ else:
+ response = Response(status=400)
+
+ return response
diff --git a/engine/apps/social_auth/__init__.py b/engine/apps/social_auth/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/social_auth/backends.py b/engine/apps/social_auth/backends.py
new file mode 100644
index 0000000000..753e814a78
--- /dev/null
+++ b/engine/apps/social_auth/backends.py
@@ -0,0 +1,164 @@
+from urllib.parse import urljoin
+
+from social_core.backends.slack import SlackOAuth2
+from social_core.utils import handle_http_errors
+
+from apps.auth_token.constants import SLACK_AUTH_TOKEN_NAME
+from apps.auth_token.models import SlackAuthToken
+
+# Scopes for slack user token.
+# It is main purpose - retrieve user data in SlackOAuth2V2 but we are using it in legacy code or weird Slack api cases.
+USER_SCOPE = ["channels:read", "identify", "chat:write", "users.profile:read", "users:read", "users:read.email"]
+
+# Scopes for slack bot token.
+# Is is prime token we are using for most requests to Slack api.
+# Changing these scopes requires confirmation in Slack app settings.
+BOT_SCOPE = [
+ "app_mentions:read",
+ "channels:history",
+ "channels:join",
+ "channels:read",
+ "chat:write",
+ "chat:write.customize",
+ "chat:write.public",
+ "commands",
+ "files:write",
+ "groups:history",
+ "groups:read",
+ "im:history",
+ "im:read",
+ "im:write",
+ "mpim:history",
+ "reactions:write",
+ "team:read",
+ "usergroups:read",
+ "usergroups:write",
+ "users.profile:read",
+ "users:read",
+ "users:read.email",
+ "users:write",
+]
+
+# Reference to Slack tokens: https://api.slack.com/authentication/token-types
+
+
+class SlackOAuth2V2(SlackOAuth2):
+ """
+ Slack app with granular permissions require using SlackOauth2.0 V2.
+ SlackOAuth2V2 and its inheritors tune SlackOAuth2 implementation from social core to fit new endpoints
+ and response shapes.
+ Read more https://api.slack.com/authentication/oauth-v2
+ """
+
+ AUTHORIZATION_URL = "https://slack.com/oauth/v2/authorize"
+ ACCESS_TOKEN_URL = "https://slack.com/api/oauth.v2.access"
+ AUTH_TOKEN_NAME = SLACK_AUTH_TOKEN_NAME
+
+ # Remove redirect state because we loose session during redirects
+ REDIRECT_STATE = False
+ STATE_PARAMETER = False
+
+ EXTRA_DATA = [("id", "id"), ("name", "name"), ("real_name", "real_name"), ("team", "team")]
+
+ @handle_http_errors
+ def auth_complete(self, *args, **kwargs):
+ """Completes login process, must return user instance"""
+ self.process_error(self.data)
+ state = self.validate_state()
+ # add auth token to redirect uri, because it must be the same in all slack auth requests
+ token_string = self.data.get(self.AUTH_TOKEN_NAME)
+ if token_string:
+ self._update_redirect_uri_with_auth_token(token_string)
+ data, params = None, None
+ if self.ACCESS_TOKEN_METHOD == "GET":
+ params = self.auth_complete_params(state)
+ else:
+ data = self.auth_complete_params(state)
+
+ response = self.request_access_token(
+ self.access_token_url(),
+ data=data,
+ params=params,
+ headers=self.auth_headers(),
+ auth=self.auth_complete_credentials(),
+ method=self.ACCESS_TOKEN_METHOD,
+ )
+ self.process_error(response)
+ access_token = response["authed_user"]["access_token"]
+ return self.do_auth(access_token, response=response, *args, **kwargs)
+
+ @handle_http_errors
+ def do_auth(self, access_token, *args, **kwargs):
+ """Finish the auth process once the access_token was retrieved"""
+ data = self.user_data(access_token, *args, **kwargs)
+ data.pop("team", None) # we don't want to override team from token by team from user_data request
+ response = kwargs.get("response") or {}
+ response.update(data or {})
+ if "access_token" not in response:
+ response["access_token"] = access_token
+ kwargs.update({"response": response, "backend": self})
+ return self.strategy.authenticate(*args, **kwargs)
+
+ def get_scope_argument(self):
+ param = {}
+ scopes = self.get_scope()
+ for k, v in scopes.items():
+ param[k] = self.SCOPE_SEPARATOR.join(v)
+ return param
+
+ def user_data(self, access_token, *args, **kwargs):
+ """
+ Override original method to load user data using method users.profile.get with users.profile:read scope
+ """
+ r = self.get_json("https://slack.com/api/users.profile.get", params={"token": access_token})
+ if r["ok"] is False:
+ r = self.get_json(
+ "https://slack.com/api/users.profile.get",
+ headers={"Authorization": f"Bearer {access_token}"},
+ )
+ r = r["profile"]
+ # Emulate shape of return value from original method to not to brake smth inside social_core
+ response = {}
+ response["user"] = {}
+ response["user"]["name"] = r["real_name_normalized"]
+ response["user"]["email"] = r["email"]
+ response["team"] = r.get("team", None)
+ return response
+
+ def start(self):
+ """Add slack auth token to redirect uri and continue authentication"""
+ token_string = self._generate_auth_token_string()
+ self._update_redirect_uri_with_auth_token(token_string)
+ return super().start()
+
+ def _generate_auth_token_string(self) -> str:
+ _, token_string = SlackAuthToken.create_auth_token(
+ self.strategy.request.user, self.strategy.request.auth.organization
+ )
+ return token_string
+
+ def _update_redirect_uri_with_auth_token(self, token_string: str) -> None:
+ auth_token_param = f"?{self.AUTH_TOKEN_NAME}={token_string}"
+ self.redirect_uri = urljoin(self.redirect_uri, auth_token_param)
+
+
+class LoginSlackOAuth2V2(SlackOAuth2V2):
+ name = "slack-login"
+ SCOPE_PARAMETER_NAME = "user_scope"
+
+ EXTRA_DATA = [
+ ("id", "id"),
+ ("name", "name"),
+ ("real_name", "real_name"),
+ ("team", "team"),
+ ]
+
+ def get_scope(self):
+ return {"user_scope": USER_SCOPE}
+
+
+class InstallSlackOAuth2V2(SlackOAuth2V2):
+ name = "slack-install-free"
+
+ def get_scope(self):
+ return {"user_scope": USER_SCOPE, "scope": BOT_SCOPE}
diff --git a/engine/apps/social_auth/live_setting_django_strategy.py b/engine/apps/social_auth/live_setting_django_strategy.py
new file mode 100644
index 0000000000..dd913e670c
--- /dev/null
+++ b/engine/apps/social_auth/live_setting_django_strategy.py
@@ -0,0 +1,44 @@
+import logging
+
+from django.conf import settings
+from django.shortcuts import resolve_url
+from django.utils.encoding import force_text
+from django.utils.functional import Promise
+from social_django.strategy import DjangoStrategy
+
+from apps.base.utils import live_settings
+
+logger = logging.getLogger(__name__)
+
+
+class LiveSettingDjangoStrategy(DjangoStrategy):
+ """
+ This strategy is used for social auth.
+
+ It allows to give aliases to original social auth settings and take them from live settings by these aliases.
+ Originally it was introduced for onprem to make names of social auth settings more obvious for users.
+ """
+
+ def get_setting(self, name):
+ name_to_live_setting_map = settings.SOCIAL_AUTH_SETTING_NAME_TO_LIVE_SETTING_NAME
+ if name in name_to_live_setting_map:
+ value = getattr(live_settings, name_to_live_setting_map[name])
+ else:
+ value = getattr(settings, name)
+ # Force text on URL named settings that are instance of Promise
+ if name.endswith("_URL"):
+ if isinstance(value, Promise):
+ value = force_text(value)
+ value = resolve_url(value)
+ return value
+
+ def build_absolute_uri(self, path=None):
+ """
+ Overriden DjangoStrategy's method to substitute and force the host value from ENV
+ """
+ if settings.SLACK_INSTALL_RETURN_REDIRECT_HOST is not None and path is not None:
+ return settings.SLACK_INSTALL_RETURN_REDIRECT_HOST + path
+ if self.request:
+ return self.request.build_absolute_uri(path)
+ else:
+ return path
diff --git a/engine/apps/social_auth/middlewares.py b/engine/apps/social_auth/middlewares.py
new file mode 100644
index 0000000000..b4a85b1fb4
--- /dev/null
+++ b/engine/apps/social_auth/middlewares.py
@@ -0,0 +1,28 @@
+from urllib.parse import urljoin
+
+from django.http import HttpResponse
+from django.shortcuts import redirect
+from rest_framework import status
+from social_core import exceptions
+from social_django.middleware import SocialAuthExceptionMiddleware
+
+from common.constants.slack_auth import REDIRECT_AFTER_SLACK_INSTALL, SLACK_AUTH_FAILED
+
+
+class SocialAuthAuthCanceledExceptionMiddleware(SocialAuthExceptionMiddleware):
+ def process_exception(self, request, exception):
+ if isinstance(exception, exceptions.AuthCanceled):
+ # if user canceled authentication, redirect them to the previous page using the same link
+ # as we used to redirect after auth/install
+ url_to_redirect = urljoin(request.user.organization.grafana_url, "/a/grafana-oncall-app/?page=chat-ops")
+ return redirect(url_to_redirect)
+ elif isinstance(exception, exceptions.AuthFailed):
+ # if authentication was failed, redirect user to the plugin page using the same link
+ # as we used to redirect after auth/install with error flag
+ url_to_redirect = urljoin(
+ request.user.organization.grafana_url,
+ f"/a/grafana-oncall-app/?page=chat-ops&slack_error={SLACK_AUTH_FAILED}",
+ )
+ return redirect(url_to_redirect)
+ elif isinstance(exception, KeyError) and REDIRECT_AFTER_SLACK_INSTALL in exception.args:
+ return HttpResponse(status=status.HTTP_401_UNAUTHORIZED)
diff --git a/engine/apps/social_auth/pipeline.py b/engine/apps/social_auth/pipeline.py
new file mode 100644
index 0000000000..436799a19c
--- /dev/null
+++ b/engine/apps/social_auth/pipeline.py
@@ -0,0 +1,102 @@
+import logging
+from urllib.parse import urljoin
+
+from django.apps import apps
+from django.http import HttpResponse
+from rest_framework import status
+from social_core.exceptions import AuthForbidden
+
+from apps.slack.tasks import populate_slack_channels_for_team, populate_slack_usergroups_for_team
+from common.constants.slack_auth import (
+ REDIRECT_AFTER_SLACK_INSTALL,
+ SLACK_AUTH_SLACK_USER_ALREADY_CONNECTED_ERROR,
+ SLACK_AUTH_WRONG_WORKSPACE_ERROR,
+)
+
+logger = logging.getLogger(__name__)
+
+
+def set_user_and_organization_from_request(backend, strategy, *args, **kwargs):
+ user = strategy.request.user
+ organization = strategy.request.auth.organization
+ if user is None or organization is None:
+ return HttpResponse(str(AuthForbidden(backend)), status=status.HTTP_401_UNAUTHORIZED)
+ return {
+ "user": user,
+ "organization": organization,
+ }
+
+
+def connect_user_to_slack(response, backend, strategy, user, organization, *args, **kwargs):
+ SlackUserIdentity = apps.get_model("slack", "SlackUserIdentity")
+
+ # Continue pipeline step only if it was installation
+ if backend.name != "slack-login":
+ return
+
+ slack_team_identity = organization.slack_team_identity
+ slack_user_id = response["authed_user"]["id"]
+
+ if slack_team_identity is None:
+ # means that organization doesn't have slack integration, so user cannot connect their account to slack
+ return HttpResponse(status=status.HTTP_400_BAD_REQUEST)
+ if slack_team_identity.slack_id != response["team"]["id"]:
+ # means that user authed in another slack workspace that is not connected to their organization
+ # change redirect url to show user error message and save it in session param
+ url = urljoin(
+ strategy.session[REDIRECT_AFTER_SLACK_INSTALL],
+ f"?page=users&slack_error={SLACK_AUTH_WRONG_WORKSPACE_ERROR}",
+ )
+ strategy.session[REDIRECT_AFTER_SLACK_INSTALL] = url
+ return HttpResponse(status=status.HTTP_400_BAD_REQUEST)
+
+ if organization.users.filter(slack_user_identity__slack_id=slack_user_id).exists():
+ # means that slack user has already been connected to another user in current organization
+ url = urljoin(
+ strategy.session[REDIRECT_AFTER_SLACK_INSTALL],
+ f"?page=users&slack_error={SLACK_AUTH_SLACK_USER_ALREADY_CONNECTED_ERROR}",
+ )
+ strategy.session[REDIRECT_AFTER_SLACK_INSTALL] = url
+ return HttpResponse(status=status.HTTP_400_BAD_REQUEST)
+
+ slack_user_identity, _ = SlackUserIdentity.objects.get_or_create(
+ slack_id=slack_user_id,
+ slack_team_identity=slack_team_identity,
+ defaults={
+ "cached_slack_email": response["user"]["email"],
+ },
+ )
+ user.slack_user_identity = slack_user_identity
+ user.save(update_fields=["slack_user_identity"])
+
+ slack_user_identity.update_profile_info()
+
+
+def populate_slack_identities(response, backend, user, organization, **kwargs):
+ SlackTeamIdentity = apps.get_model("slack", "SlackTeamIdentity")
+
+ # Continue pipeline step only if it was installation
+ if backend.name != "slack-install-free":
+ return
+
+ if organization.slack_team_identity is not None:
+ # means that organization already has slack integration
+ return HttpResponse(status=status.HTTP_400_BAD_REQUEST)
+
+ slack_team_id = response["team"]["id"]
+ slack_team_identity, is_slack_team_identity_created = SlackTeamIdentity.objects.get_or_create(
+ slack_id=slack_team_id,
+ )
+
+ # update slack oauth fields by data from response
+ slack_team_identity.update_oauth_fields(user, organization, response)
+
+ populate_slack_channels_for_team.apply_async((slack_team_identity.pk,))
+ user.slack_user_identity.update_profile_info()
+ # todo slack: do we need update info for all existing slack users in slack team?
+ # populate_slack_user_identities.apply_async((organization.pk,))
+ populate_slack_usergroups_for_team.apply_async((slack_team_identity.pk,), countdown=10)
+
+
+def delete_slack_auth_token(strategy, *args, **kwargs):
+ strategy.request.auth.delete()
diff --git a/engine/apps/social_auth/urls.py b/engine/apps/social_auth/urls.py
new file mode 100644
index 0000000000..ed65946b73
--- /dev/null
+++ b/engine/apps/social_auth/urls.py
@@ -0,0 +1,11 @@
+from django.urls import path
+
+from .views import overridden_complete_slack_auth, overridden_login_slack_auth
+
+app_name = "social_auth"
+
+urlpatterns = [
+ path(r"login/", overridden_login_slack_auth, name="slack-auth-with-no-slash"),
+ path(r"login//", overridden_login_slack_auth, name="slack-auth"),
+ path(r"complete//", overridden_complete_slack_auth, name="complete-slack-auth"),
+]
diff --git a/engine/apps/social_auth/views.py b/engine/apps/social_auth/views.py
new file mode 100644
index 0000000000..208898967b
--- /dev/null
+++ b/engine/apps/social_auth/views.py
@@ -0,0 +1,50 @@
+import logging
+from urllib.parse import urljoin
+
+from django.contrib.auth import REDIRECT_FIELD_NAME
+from django.http import HttpResponseRedirect
+from django.views.decorators.cache import never_cache
+from django.views.decorators.csrf import csrf_exempt
+from rest_framework.decorators import api_view, authentication_classes
+from rest_framework.response import Response
+from social_core.actions import do_auth, do_complete
+from social_django.utils import psa
+from social_django.views import _do_login
+
+from apps.auth_token.auth import PluginAuthentication, SlackTokenAuthentication
+
+logger = logging.getLogger(__name__)
+
+
+@api_view(["GET"])
+@authentication_classes([PluginAuthentication])
+@never_cache
+@psa("social:complete")
+def overridden_login_slack_auth(request, backend):
+ # We can't just redirect frontend here because we need to make a API call and pass tokens to this view from JS.
+ # So frontend can't follow our redirect.
+ # So wrapping and returning URL to redirect as a string.
+ url_to_redirect_to = do_auth(request.backend, redirect_name=REDIRECT_FIELD_NAME).url
+
+ return Response(url_to_redirect_to, 200)
+
+
+@api_view(["GET"])
+@authentication_classes([SlackTokenAuthentication])
+@never_cache
+@csrf_exempt
+@psa("social:complete")
+def overridden_complete_slack_auth(request, backend, *args, **kwargs):
+ """Authentication complete view"""
+ do_complete(
+ request.backend,
+ _do_login,
+ user=request.user,
+ redirect_name=REDIRECT_FIELD_NAME,
+ request=request,
+ *args,
+ **kwargs,
+ )
+ # We build the frontend url using org url since multiple stacks could be connected to one backend.
+ return_to = urljoin(request.user.organization.grafana_url, "/a/grafana-oncall-app/?page=chat-ops")
+ return HttpResponseRedirect(return_to)
diff --git a/engine/apps/telegram/__init__.py b/engine/apps/telegram/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/telegram/alert_group_representative.py b/engine/apps/telegram/alert_group_representative.py
new file mode 100644
index 0000000000..355e3a2fb3
--- /dev/null
+++ b/engine/apps/telegram/alert_group_representative.py
@@ -0,0 +1,134 @@
+import logging
+
+from django.apps import apps
+
+from apps.alerts.models import AlertGroup
+from apps.alerts.representative import AlertGroupAbstractRepresentative
+from apps.telegram.models import TelegramMessage
+from apps.telegram.tasks import edit_message, on_create_alert_telegram_representative_async
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+class AlertGroupTelegramRepresentative(AlertGroupAbstractRepresentative):
+ def __init__(self, log_record):
+ self.log_record = log_record
+
+ def is_applicable(self):
+ TelegramToUserConnector = apps.get_model("telegram", "TelegramToUserConnector")
+ TelegramToOrganizationConnector = apps.get_model("telegram", "TelegramToOrganizationConnector")
+
+ organization = self.log_record.alert_group.channel.organization
+
+ handler_exists = self.log_record.type in self.get_handlers_map().keys()
+
+ telegram_org_connector = TelegramToOrganizationConnector.objects.filter(organization=organization)
+ telegram_channel_configured = telegram_org_connector.exists() and telegram_org_connector[0].is_configured
+
+ is_user_in_org_using_telegram = TelegramToUserConnector.objects.filter(user__organization=organization).exists()
+
+ return handler_exists and (telegram_channel_configured or is_user_in_org_using_telegram)
+
+ @staticmethod
+ def get_handlers_map():
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+
+ return {
+ AlertGroupLogRecord.TYPE_ACK: "alert_group_action",
+ AlertGroupLogRecord.TYPE_UN_ACK: "alert_group_action",
+ AlertGroupLogRecord.TYPE_AUTO_UN_ACK: "alert_group_action",
+ AlertGroupLogRecord.TYPE_RESOLVED: "alert_group_action",
+ AlertGroupLogRecord.TYPE_UN_RESOLVED: "alert_group_action",
+ AlertGroupLogRecord.TYPE_ACK_REMINDER_TRIGGERED: "alert_group_action",
+ AlertGroupLogRecord.TYPE_SILENCE: "alert_group_action",
+ AlertGroupLogRecord.TYPE_UN_SILENCE: "alert_group_action",
+ AlertGroupLogRecord.TYPE_ATTACHED: "alert_group_action",
+ AlertGroupLogRecord.TYPE_UNATTACHED: "alert_group_action",
+ }
+
+ # Process all alert group actions (ack, resolve, etc.)
+ def on_alert_group_action(self):
+ messages_to_edit = self.log_record.alert_group.telegram_messages.filter(
+ message_type__in=(
+ TelegramMessage.ALERT_GROUP_MESSAGE,
+ TelegramMessage.ACTIONS_MESSAGE,
+ TelegramMessage.PERSONAL_MESSAGE,
+ )
+ )
+ for message in messages_to_edit:
+ edit_message.delay(message_pk=message.pk)
+
+ @classmethod
+ def on_alert_group_update_log_report(cls, **kwargs):
+ logger.info("AlertGroupTelegramRepresentative UPDATE LOG REPORT SIGNAL")
+ alert_group = kwargs["alert_group"]
+ if not isinstance(alert_group, AlertGroup):
+ alert_group = AlertGroup.all_objects.get(pk=alert_group)
+
+ # telegram notification is disabled for channel filter
+ if alert_group.notify_in_telegram_enabled is False:
+ logger.debug(f"Skipping alert group with id {alert_group.pk} since notify_in_telegram is disabled")
+ return
+
+ messages_to_edit = alert_group.telegram_messages.filter(
+ message_type__in=(
+ TelegramMessage.LOG_MESSAGE,
+ TelegramMessage.PERSONAL_MESSAGE,
+ )
+ )
+
+ for message in messages_to_edit:
+ edit_message.delay(message_pk=message.pk)
+
+ @classmethod
+ def on_alert_group_action_triggered(cls, **kwargs):
+ AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
+ log_record = kwargs["log_record"]
+ logger.info(f"AlertGroupTelegramRepresentative ACTION SIGNAL, log record {log_record}")
+
+ if not isinstance(log_record, AlertGroupLogRecord):
+ log_record = AlertGroupLogRecord.objects.get(pk=log_record)
+
+ # telegram notification is disabled for channel filter
+ if log_record.alert_group.notify_in_telegram_enabled is False:
+ logger.debug(
+ f"Skipping alert group with id {log_record.alert_group.pk} since notify_in_telegram is disabled"
+ )
+ return
+
+ instance = cls(log_record)
+ if instance.is_applicable():
+ handler = instance.get_handler()
+ handler()
+
+ @staticmethod
+ def on_create_alert(**kwargs):
+ Alert = apps.get_model("alerts", "Alert")
+
+ alert_pk = kwargs["alert"]
+ alert = Alert.objects.get(pk=alert_pk)
+
+ # telegram notification is disabled for channel filter
+ if alert.group.notify_in_telegram_enabled is False:
+ logger.debug(f"Skipping alert with id {alert.pk} since notify_in_telegram is disabled")
+ return
+
+ on_create_alert_telegram_representative_async.apply_async((alert_pk,))
+
+ def get_handler(self):
+ handler_name = self.get_handler_name()
+ logger.info(f"Using '{handler_name}' handler to process action signal")
+ if hasattr(self, handler_name):
+ handler = getattr(self, handler_name)
+ else:
+ handler = self.on_handler_not_found
+
+ return handler
+
+ def get_handler_name(self):
+ return self.HANDLER_PREFIX + self.get_handlers_map()[self.log_record.type]
+
+ @classmethod
+ def on_handler_not_found(cls):
+ pass
diff --git a/engine/apps/telegram/apps.py b/engine/apps/telegram/apps.py
new file mode 100644
index 0000000000..49c8fd78a0
--- /dev/null
+++ b/engine/apps/telegram/apps.py
@@ -0,0 +1,8 @@
+from django.apps import AppConfig
+
+
+class TelegramConfig(AppConfig):
+ name = "apps.telegram"
+
+ def ready(self):
+ import apps.telegram.signals # noqa: F401
diff --git a/engine/apps/telegram/client.py b/engine/apps/telegram/client.py
new file mode 100644
index 0000000000..280e26c505
--- /dev/null
+++ b/engine/apps/telegram/client.py
@@ -0,0 +1,147 @@
+from typing import Optional, Tuple, Union
+
+from django.conf import settings
+from telegram import Bot, InlineKeyboardMarkup, Message, ParseMode
+from telegram.error import InvalidToken, Unauthorized
+from telegram.utils.request import Request
+
+from apps.alerts.models import AlertGroup
+from apps.base.utils import live_settings
+from apps.telegram.models import TelegramMessage
+from apps.telegram.renderers.keyboard import TelegramKeyboardRenderer
+from apps.telegram.renderers.message import TelegramMessageRenderer
+
+
+class TelegramClient:
+ ALLOWED_UPDATES = ("message", "callback_query")
+ PARSE_MODE = ParseMode.HTML
+
+ def __init__(self, token: Optional[str] = None):
+ self.token = token or live_settings.TELEGRAM_TOKEN
+
+ if self.token is None:
+ raise InvalidToken()
+
+ @property
+ def api_client(self) -> Bot:
+ return Bot(self.token, request=Request(read_timeout=15))
+
+ def is_chat_member(self, chat_id: Union[int, str]) -> bool:
+ try:
+ self.api_client.get_chat(chat_id=chat_id)
+ return True
+ except Unauthorized:
+ return False
+
+ def register_webhook(self, webhook_url: Optional[str] = None) -> None:
+ webhook_url = webhook_url or settings.TELEGRAM_WEBHOOK_URL
+
+ webhook_info = self.api_client.get_webhook_info()
+ if webhook_info.url == webhook_url:
+ return
+
+ self.api_client.set_webhook(webhook_url, allowed_updates=self.ALLOWED_UPDATES)
+
+ def send_message(
+ self,
+ chat_id: Union[int, str],
+ message_type: int,
+ alert_group: AlertGroup,
+ reply_to_message_id: Optional[int] = None,
+ ) -> TelegramMessage:
+ text, keyboard = self._get_message_and_keyboard(message_type=message_type, alert_group=alert_group)
+
+ raw_message = self.send_raw_message(
+ chat_id=chat_id, text=text, keyboard=keyboard, reply_to_message_id=reply_to_message_id
+ )
+ message = TelegramMessage.create_from_message(
+ message=raw_message, alert_group=alert_group, message_type=message_type
+ )
+
+ return message
+
+ def send_raw_message(
+ self,
+ chat_id: Union[int, str],
+ text: str,
+ keyboard: Optional[InlineKeyboardMarkup] = None,
+ reply_to_message_id: Optional[int] = None,
+ ) -> Message:
+ message = self.api_client.send_message(
+ chat_id=chat_id,
+ text=text,
+ reply_markup=keyboard,
+ reply_to_message_id=reply_to_message_id,
+ parse_mode=self.PARSE_MODE,
+ disable_web_page_preview=False,
+ )
+ return message
+
+ def edit_message(self, message: TelegramMessage) -> TelegramMessage:
+ text, keyboard = self._get_message_and_keyboard(
+ message_type=message.message_type, alert_group=message.alert_group
+ )
+
+ self.edit_raw_message(chat_id=message.chat_id, message_id=message.message_id, text=text, keyboard=keyboard)
+ return message
+
+ def edit_raw_message(
+ self,
+ chat_id: Union[int, str],
+ message_id: Union[int, str],
+ text: str,
+ keyboard: Optional[InlineKeyboardMarkup] = None,
+ ) -> Message:
+ message = self.api_client.edit_message_text(
+ chat_id=chat_id,
+ message_id=message_id,
+ text=text,
+ reply_markup=keyboard,
+ parse_mode=self.PARSE_MODE,
+ disable_web_page_preview=False,
+ )
+ return message
+
+ @staticmethod
+ def _get_message_and_keyboard(
+ message_type: int, alert_group: AlertGroup
+ ) -> Tuple[str, Optional[InlineKeyboardMarkup]]:
+ message_renderer = TelegramMessageRenderer(alert_group=alert_group)
+ keyboard_renderer = TelegramKeyboardRenderer(alert_group=alert_group)
+
+ if message_type == TelegramMessage.ALERT_GROUP_MESSAGE:
+ text = message_renderer.render_alert_group_message()
+ keyboard = None
+ elif message_type == TelegramMessage.LOG_MESSAGE:
+ text = message_renderer.render_log_message()
+ keyboard = None
+ elif message_type == TelegramMessage.ACTIONS_MESSAGE:
+ text = message_renderer.render_actions_message()
+ keyboard = keyboard_renderer.render_actions_keyboard()
+ elif message_type == TelegramMessage.PERSONAL_MESSAGE:
+ text = message_renderer.render_personal_message()
+ keyboard = keyboard_renderer.render_actions_keyboard()
+ elif message_type == TelegramMessage.FORMATTING_ERROR:
+ text = message_renderer.render_formatting_error_message()
+ keyboard = None
+ elif message_type in (
+ TelegramMessage.LINK_TO_CHANNEL_MESSAGE,
+ TelegramMessage.LINK_TO_CHANNEL_MESSAGE_WITHOUT_TITLE,
+ ):
+ alert_group_message = alert_group.telegram_messages.filter(
+ chat_id__startswith="-",
+ message_type__in=[TelegramMessage.ALERT_GROUP_MESSAGE, TelegramMessage.FORMATTING_ERROR],
+ ).first()
+
+ if alert_group_message is None:
+ raise Exception("No alert group message found, probably it is not saved to database yet")
+
+ include_title = message_type == TelegramMessage.LINK_TO_CHANNEL_MESSAGE
+ link = alert_group_message.link
+
+ text = message_renderer.render_link_to_channel_message(include_title=include_title)
+ keyboard = keyboard_renderer.render_link_to_channel_keyboard(link=link)
+ else:
+ raise Exception(f"_get_message_and_keyboard with type {message_type} is not implemented")
+
+ return text, keyboard
diff --git a/engine/apps/telegram/decorators.py b/engine/apps/telegram/decorators.py
new file mode 100644
index 0000000000..a5560e4be0
--- /dev/null
+++ b/engine/apps/telegram/decorators.py
@@ -0,0 +1,86 @@
+import logging
+from functools import wraps
+
+from django.core.exceptions import ImproperlyConfigured
+from telegram import error
+
+from apps.telegram.client import TelegramClient
+
+logger = logging.getLogger(__name__)
+
+
+def handle_missing_token(f):
+ @wraps(f)
+ def decorated(*args, **kwargs):
+ try:
+ TelegramClient()
+ except (ImproperlyConfigured, error.InvalidToken) as e:
+ logger.warning(
+ "Tried to initialize a Telegram client, but TELEGRAM_TOKEN live setting is invalid or missing. "
+ f"Exception: {e}"
+ )
+ return
+ else:
+ return f(*args, **kwargs)
+
+ return decorated
+
+
+def ignore_bot_deleted(f):
+ @wraps(f)
+ def decorated(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except error.Unauthorized:
+ logger.warning(f"Tried to send Telegram message, but user deleted the bot. args: {args}, kwargs: {kwargs}")
+
+ return decorated
+
+
+def ignore_message_unchanged(f):
+ @wraps(f)
+ def decorated(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except error.BadRequest as e:
+ if "Message is not modified" in e.message:
+ logger.warning(
+ f"Tried to change Telegram message, but update is identical to original message. "
+ f"args: {args}, kwargs: {kwargs}"
+ )
+ else:
+ raise e
+
+ return decorated
+
+
+def ignore_message_to_edit_deleted(f):
+ @wraps(f)
+ def decorated(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except error.BadRequest as e:
+ if "Message to edit not found" in e.message:
+ logger.warning(
+ f"Tried to edit Telegram message, but message was deleted. args: {args}, kwargs: {kwargs}"
+ )
+ else:
+ raise e
+
+ return decorated
+
+
+def ignore_reply_to_message_deleted(f):
+ @wraps(f)
+ def decorated(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except error.BadRequest as e:
+ if "Replied message not found" in e.message:
+ logger.warning(
+ f"Tried to reply to Telegram message, but message was deleted. args: {args}, kwargs: {kwargs}"
+ )
+ else:
+ raise e
+
+ return decorated
diff --git a/engine/apps/telegram/migrations/0001_squashed_initial.py b/engine/apps/telegram/migrations/0001_squashed_initial.py
new file mode 100644
index 0000000000..196726e035
--- /dev/null
+++ b/engine/apps/telegram/migrations/0001_squashed_initial.py
@@ -0,0 +1,76 @@
+# Generated by Django 3.2.5 on 2022-05-31 14:46
+
+import apps.telegram.models.connectors.channel
+import django.core.validators
+from django.db import migrations, models
+import django.db.models.deletion
+import uuid
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ('user_management', '0001_squashed_initial'),
+ ('alerts', '0001_squashed_initial'),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='TelegramVerificationCode',
+ fields=[
+ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
+ ('datetime', models.DateTimeField(auto_now_add=True)),
+ ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='telegram_verification_code', to='user_management.user')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='TelegramToOrganizationConnector',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('public_primary_key', models.CharField(default=apps.telegram.models.connectors.channel.generate_public_primary_key_for_telegram_to_at_connector, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('is_default_channel', models.BooleanField(default=False, null=True)),
+ ('channel_chat_id', models.CharField(max_length=100, unique=True)),
+ ('channel_name', models.CharField(default=None, max_length=100, null=True)),
+ ('discussion_group_chat_id', models.CharField(max_length=100, unique=True)),
+ ('discussion_group_name', models.CharField(default=None, max_length=100, null=True)),
+ ('datetime', models.DateTimeField(auto_now_add=True)),
+ ('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='telegram_channel', to='user_management.organization')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='TelegramMessage',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('message_id', models.IntegerField()),
+ ('chat_id', models.CharField(max_length=100)),
+ ('message_type', models.IntegerField(choices=[(0, 'Alert group message'), (1, 'Actions message'), (2, 'Log message'), (3, 'Alert can not be rendered'), (4, 'Alert group message with action buttons and incident log'), (5, 'Link to channel message'), (6, 'Link to channel message without title')])),
+ ('discussion_group_message_id', models.IntegerField(default=None, null=True)),
+ ('edit_task_id', models.CharField(default=None, max_length=100, null=True)),
+ ('alert_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='telegram_messages', to='alerts.alertgroup')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='TelegramChannelVerificationCode',
+ fields=[
+ ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
+ ('datetime', models.DateTimeField(auto_now_add=True)),
+ ('author', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='user_management.user')),
+ ('organization', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='telegram_verification_code', to='user_management.organization')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='TelegramToUserConnector',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('telegram_chat_id', models.BigIntegerField()),
+ ('telegram_nick_name', models.CharField(default=None, max_length=100, null=True)),
+ ('datetime', models.DateTimeField(auto_now_add=True)),
+ ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='telegram_connection', to='user_management.user')),
+ ],
+ options={
+ 'unique_together': {('user', 'telegram_chat_id')},
+ },
+ ),
+ ]
diff --git a/engine/apps/telegram/migrations/__init__.py b/engine/apps/telegram/migrations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/telegram/models/__init__.py b/engine/apps/telegram/models/__init__.py
new file mode 100644
index 0000000000..434a9eced0
--- /dev/null
+++ b/engine/apps/telegram/models/__init__.py
@@ -0,0 +1,5 @@
+from .message import TelegramMessage # noqa: F401, isort: skip
+from .connectors.channel import TelegramToOrganizationConnector # noqa: F401
+from .connectors.personal import TelegramToUserConnector # noqa: F401
+from .verification.channel import TelegramChannelVerificationCode # noqa: F401
+from .verification.personal import TelegramVerificationCode # noqa: F401
diff --git a/engine/apps/telegram/models/connectors/__init__.py b/engine/apps/telegram/models/connectors/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/telegram/models/connectors/channel.py b/engine/apps/telegram/models/connectors/channel.py
new file mode 100644
index 0000000000..a24508a67b
--- /dev/null
+++ b/engine/apps/telegram/models/connectors/channel.py
@@ -0,0 +1,133 @@
+import logging
+from typing import Optional
+
+from django.conf import settings
+from django.core.validators import MinLengthValidator
+from django.db import models
+from django.db.models import Q
+from telegram import error
+
+from apps.alerts.models import AlertGroup
+from apps.telegram.client import TelegramClient
+from apps.telegram.models import TelegramMessage
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+
+logger = logging.getLogger(__name__)
+
+
+def generate_public_primary_key_for_telegram_to_at_connector() -> str:
+ prefix = "Z"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while TelegramToOrganizationConnector.objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="TelegramToOrganizationConnector"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+class TelegramToOrganizationConnector(models.Model):
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_telegram_to_at_connector,
+ )
+ organization = models.ForeignKey(
+ "user_management.Organization",
+ on_delete=models.CASCADE,
+ related_name="telegram_channel",
+ )
+
+ is_default_channel = models.BooleanField(null=True, default=False)
+
+ channel_chat_id = models.CharField(unique=True, max_length=100)
+ channel_name = models.CharField(max_length=100, null=True, default=None)
+
+ discussion_group_chat_id = models.CharField(unique=True, max_length=100)
+ discussion_group_name = models.CharField(max_length=100, null=True, default=None)
+
+ datetime = models.DateTimeField(auto_now_add=True)
+
+ NUM_GROUPED_ALERTS_IN_COMMENTS = 10
+
+ @property
+ def is_configured(self) -> bool:
+ return self.channel_chat_id is not None and self.discussion_group_chat_id is not None
+
+ @classmethod
+ def get_channel_for_alert_group(cls, alert_group: AlertGroup) -> Optional["TelegramToOrganizationConnector"]:
+ # TODO: add custom queryset
+ dm_messages_exist = alert_group.telegram_messages.filter(
+ ~Q(chat_id__startswith="-")
+ & Q(
+ message_type__in=(
+ TelegramMessage.PERSONAL_MESSAGE,
+ TelegramMessage.FORMATTING_ERROR,
+ )
+ ),
+ ).exists()
+
+ if dm_messages_exist:
+ return None
+
+ default_channel = cls.objects.filter(
+ organization=alert_group.channel.organization, is_default_channel=True
+ ).first()
+
+ if alert_group.channel_filter is None:
+ return default_channel
+
+ return alert_group.channel_filter.telegram_channel or default_channel
+
+ def make_channel_default(self, author):
+ try:
+ old_default_channel = TelegramToOrganizationConnector.objects.get(
+ organization=self.organization, is_default_channel=True
+ )
+ old_default_channel.is_default_channel = False
+ old_default_channel.save(update_fields=["is_default_channel"])
+ except TelegramToOrganizationConnector.DoesNotExist:
+ old_default_channel = None
+
+ self.is_default_channel = True
+ self.save(update_fields=["is_default_channel"])
+
+ description = (
+ f"The default channel for incidents in Telegram was changed "
+ f"{f'from @{old_default_channel.channel_name} ' if old_default_channel else ''}"
+ f"to @{self.channel_name}"
+ )
+ create_organization_log(
+ self.organization,
+ author,
+ OrganizationLogType.TYPE_TELEGRAM_DEFAULT_CHANNEL_CHANGED,
+ description,
+ )
+
+ def send_alert_group_message(self, alert_group: AlertGroup) -> None:
+ telegram_client = TelegramClient()
+
+ try:
+ telegram_client.send_message(
+ chat_id=self.channel_chat_id, message_type=TelegramMessage.ALERT_GROUP_MESSAGE, alert_group=alert_group
+ )
+ except error.BadRequest as e:
+ if e.message == "Need administrator rights in the channel chat":
+ logger.warning(
+ f"Could not send alert group to Telegram channel with id {self.channel_chat_id} "
+ f"due to lack of admin rights. alert_group {alert_group.pk}"
+ )
+ elif e.message == "Chat not found":
+ logger.warning(
+ f"Could not send alert group to Telegram channel with id {self.channel_chat_id} "
+ f"due to 'Chat not found'. alert_group {alert_group.pk}"
+ )
+ else:
+ telegram_client.send_message(
+ chat_id=self.channel_chat_id, message_type=TelegramMessage.FORMATTING_ERROR, alert_group=alert_group
+ )
diff --git a/engine/apps/telegram/models/connectors/personal.py b/engine/apps/telegram/models/connectors/personal.py
new file mode 100644
index 0000000000..4b0533d808
--- /dev/null
+++ b/engine/apps/telegram/models/connectors/personal.py
@@ -0,0 +1,166 @@
+from django.core.exceptions import ImproperlyConfigured
+from django.db import models
+from telegram import error
+
+from apps.alerts.models import AlertGroup
+from apps.base.models import UserNotificationPolicy, UserNotificationPolicyLogRecord
+from apps.telegram.client import TelegramClient
+from apps.telegram.models import TelegramMessage, TelegramToOrganizationConnector
+from apps.telegram.tasks import send_link_to_channel_message_or_fallback_to_full_incident
+from apps.user_management.models import User
+
+ONE_MORE_NOTIFICATION = "One more notification about this ☝"
+ALERT_CANT_BE_RENDERED = "You have a new incident, but Telegram can't render its content! Please check it out: {link}"
+
+
+class TelegramToUserConnector(models.Model):
+ user = models.OneToOneField("user_management.User", on_delete=models.CASCADE, related_name="telegram_connection")
+
+ telegram_chat_id = models.BigIntegerField()
+ telegram_nick_name = models.CharField(max_length=100, null=True, default=None)
+ datetime = models.DateTimeField(auto_now_add=True)
+
+ class Meta:
+ unique_together = (("user", "telegram_chat_id"),)
+
+ @classmethod
+ def notify_user(cls, user: User, alert_group: AlertGroup, notification_policy: UserNotificationPolicy) -> None:
+ try:
+ user_connector = user.telegram_connection
+ user_connector.notify(alert_group=alert_group, notification_policy=notification_policy)
+ except TelegramToUserConnector.DoesNotExist:
+ cls.create_telegram_notification_error(
+ alert_group=alert_group,
+ user=user,
+ notification_policy=notification_policy,
+ error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_TELEGRAM_IS_NOT_LINKED_TO_SLACK_ACC,
+ )
+
+ def notify(self, alert_group: AlertGroup, notification_policy: UserNotificationPolicy) -> None:
+ telegram_channel = TelegramToOrganizationConnector.get_channel_for_alert_group(alert_group)
+
+ if telegram_channel is not None:
+ send_link_to_channel_message_or_fallback_to_full_incident.delay(
+ alert_group_pk=alert_group.pk,
+ notification_policy_pk=notification_policy.pk,
+ user_connector_pk=self.pk,
+ )
+ else:
+ self.send_full_incident(alert_group=alert_group, notification_policy=notification_policy)
+
+ @staticmethod
+ def create_telegram_notification_error(
+ alert_group: AlertGroup, user: User, notification_policy: UserNotificationPolicy, error_code: int
+ ) -> None:
+ log_record = UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ notification_error_code=error_code,
+ notification_step=notification_policy.step if notification_policy else None,
+ notification_channel=notification_policy.notify_by if notification_policy else None,
+ )
+ log_record.save()
+
+ # send the actual incident and incident log to user's DM
+ def send_full_incident(self, alert_group: AlertGroup, notification_policy: UserNotificationPolicy) -> None:
+ try:
+ telegram_client = TelegramClient()
+ except (ImproperlyConfigured, error.InvalidToken):
+ TelegramToUserConnector.create_telegram_notification_error(
+ alert_group,
+ self.user,
+ notification_policy,
+ UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_TELEGRAM_TOKEN_ERROR,
+ )
+ return
+
+ old_alert_group_message = alert_group.telegram_messages.filter(
+ chat_id=self.telegram_chat_id,
+ message_type__in=[
+ TelegramMessage.PERSONAL_MESSAGE,
+ TelegramMessage.FORMATTING_ERROR,
+ ],
+ ).first()
+
+ if old_alert_group_message is None:
+ try:
+ telegram_client.send_message(
+ chat_id=self.telegram_chat_id,
+ message_type=TelegramMessage.PERSONAL_MESSAGE,
+ alert_group=alert_group,
+ )
+ except error.BadRequest:
+ telegram_client.send_message(
+ chat_id=self.telegram_chat_id,
+ message_type=TelegramMessage.FORMATTING_ERROR,
+ alert_group=alert_group,
+ )
+ except error.Unauthorized as e:
+ if e.message == "Forbidden: bot was blocked by the user":
+ TelegramToUserConnector.create_telegram_notification_error(
+ alert_group,
+ self.user,
+ notification_policy,
+ UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_TELEGRAM_BOT_IS_DELETED,
+ )
+ elif e.message == "Invalid token":
+ TelegramToUserConnector.create_telegram_notification_error(
+ alert_group,
+ self.user,
+ notification_policy,
+ UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_TELEGRAM_TOKEN_ERROR,
+ )
+ else:
+ raise e
+ else:
+ telegram_client.send_raw_message(
+ chat_id=old_alert_group_message.chat_id,
+ text=ONE_MORE_NOTIFICATION,
+ reply_to_message_id=old_alert_group_message.message_id,
+ )
+
+ # send DM message with the link to the alert group post in channel
+ def send_link_to_channel_message(self, alert_group: AlertGroup, notification_policy: UserNotificationPolicy):
+ try:
+ telegram_client = TelegramClient()
+ except (ImproperlyConfigured, error.InvalidToken):
+ TelegramToUserConnector.create_telegram_notification_error(
+ alert_group,
+ self.user,
+ notification_policy,
+ UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_TELEGRAM_TOKEN_ERROR,
+ )
+ return
+
+ try:
+ telegram_client.send_message(
+ chat_id=self.telegram_chat_id,
+ message_type=TelegramMessage.LINK_TO_CHANNEL_MESSAGE,
+ alert_group=alert_group,
+ )
+ except error.BadRequest:
+ # incorrect format of the title, so do not include it to the link to channel message
+ telegram_client.send_message(
+ chat_id=self.telegram_chat_id,
+ message_type=TelegramMessage.LINK_TO_CHANNEL_MESSAGE_WITHOUT_TITLE,
+ alert_group=alert_group,
+ )
+ except error.Unauthorized as e:
+ if e.message == "Forbidden: bot was blocked by the user":
+ TelegramToUserConnector.create_telegram_notification_error(
+ alert_group,
+ self.user,
+ notification_policy,
+ UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_TELEGRAM_BOT_IS_DELETED,
+ )
+ elif e.message == "Invalid token":
+ TelegramToUserConnector.create_telegram_notification_error(
+ alert_group,
+ self.user,
+ notification_policy,
+ UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_TELEGRAM_TOKEN_ERROR,
+ )
+ else:
+ raise e
diff --git a/engine/apps/telegram/models/message.py b/engine/apps/telegram/models/message.py
new file mode 100644
index 0000000000..2d9af5ff42
--- /dev/null
+++ b/engine/apps/telegram/models/message.py
@@ -0,0 +1,58 @@
+import telegram
+from django.db import models
+
+from apps.alerts.models import AlertGroup
+
+
+class TelegramMessage(models.Model):
+ (
+ ALERT_GROUP_MESSAGE,
+ ACTIONS_MESSAGE,
+ LOG_MESSAGE,
+ FORMATTING_ERROR,
+ PERSONAL_MESSAGE,
+ LINK_TO_CHANNEL_MESSAGE,
+ LINK_TO_CHANNEL_MESSAGE_WITHOUT_TITLE,
+ ) = range(7)
+
+ TELEGRAM_MESSAGE_CHOICES = (
+ (ALERT_GROUP_MESSAGE, "Alert group message"),
+ (ACTIONS_MESSAGE, "Actions message"),
+ (LOG_MESSAGE, "Log message"),
+ (FORMATTING_ERROR, "Alert can not be rendered"),
+ (PERSONAL_MESSAGE, "Alert group message with action buttons and incident log"),
+ (LINK_TO_CHANNEL_MESSAGE, "Link to channel message"),
+ (LINK_TO_CHANNEL_MESSAGE_WITHOUT_TITLE, "Link to channel message without title"),
+ )
+
+ message_id = models.IntegerField()
+ chat_id = models.CharField(max_length=100)
+
+ message_type = models.IntegerField(choices=TELEGRAM_MESSAGE_CHOICES)
+
+ discussion_group_message_id = models.IntegerField(null=True, default=None)
+
+ alert_group = models.ForeignKey(
+ "alerts.AlertGroup",
+ on_delete=models.CASCADE,
+ related_name="telegram_messages",
+ )
+
+ # field for task debouncing for apps.telegram.tasks.edit_message
+ edit_task_id = models.CharField(max_length=100, null=True, default=None)
+
+ @property
+ def link(self) -> str:
+ chat_slug = self.chat_id[-10:]
+ return f"https://t.me/c/{chat_slug}/{self.message_id}?thread={self.message_id}"
+
+ @staticmethod
+ def create_from_message(
+ message: telegram.Message,
+ message_type: int,
+ alert_group: AlertGroup,
+ ) -> "TelegramMessage":
+ message = TelegramMessage.objects.create(
+ message_id=message.message_id, chat_id=message.chat.id, message_type=message_type, alert_group=alert_group
+ )
+ return message
diff --git a/engine/apps/telegram/models/verification/__init__.py b/engine/apps/telegram/models/verification/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/telegram/models/verification/channel.py b/engine/apps/telegram/models/verification/channel.py
new file mode 100644
index 0000000000..e3a7ce1a60
--- /dev/null
+++ b/engine/apps/telegram/models/verification/channel.py
@@ -0,0 +1,73 @@
+from typing import Optional, Tuple
+from uuid import uuid4
+
+from django.core.exceptions import ValidationError
+from django.db import models
+from django.utils import timezone
+
+from apps.telegram.models import TelegramToOrganizationConnector
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+
+
+class TelegramChannelVerificationCode(models.Model):
+ uuid = models.UUIDField(primary_key=True, default=uuid4, editable=False)
+ datetime = models.DateTimeField(auto_now_add=True)
+
+ organization = models.OneToOneField(
+ "user_management.Organization", on_delete=models.CASCADE, related_name="telegram_verification_code"
+ )
+
+ author = models.ForeignKey("user_management.User", on_delete=models.CASCADE, null=True, default=None)
+
+ @property
+ def is_active(self) -> bool:
+ return self.datetime + timezone.timedelta(days=1) < timezone.now()
+
+ @classmethod
+ def verify_channel_and_discussion_group(
+ cls,
+ uuid_code: str,
+ channel_chat_id: int,
+ channel_name: str,
+ discussion_group_chat_id: int,
+ discussion_group_name: str,
+ ) -> Tuple[Optional[TelegramToOrganizationConnector], bool]:
+ try:
+ verification_code = cls.objects.get(uuid=uuid_code)
+
+ # see if a organization has other channels connected
+ # if it is the first channel, make it default for the organization
+ connector_exists = verification_code.organization.telegram_channel.exists()
+
+ connector, created = TelegramToOrganizationConnector.objects.get_or_create(
+ channel_chat_id=channel_chat_id,
+ defaults={
+ "organization": verification_code.organization,
+ "channel_name": channel_name,
+ "discussion_group_chat_id": discussion_group_chat_id,
+ "discussion_group_name": discussion_group_name,
+ "is_default_channel": not connector_exists,
+ },
+ )
+
+ description = f"Telegram channel @{channel_name} was connected to organization"
+ create_organization_log(
+ verification_code.organization,
+ verification_code.author,
+ OrganizationLogType.TYPE_TELEGRAM_CHANNEL_CONNECTED,
+ description,
+ )
+
+ if not connector_exists:
+ description = f"The default channel for incidents in Telegram was changed to @{channel_name}"
+ create_organization_log(
+ verification_code.organization,
+ verification_code.author,
+ OrganizationLogType.TYPE_TELEGRAM_DEFAULT_CHANNEL_CHANGED,
+ description,
+ )
+
+ return connector, created
+
+ except (ValidationError, cls.DoesNotExist):
+ return None, False
diff --git a/engine/apps/telegram/models/verification/personal.py b/engine/apps/telegram/models/verification/personal.py
new file mode 100644
index 0000000000..1a689bbd3a
--- /dev/null
+++ b/engine/apps/telegram/models/verification/personal.py
@@ -0,0 +1,46 @@
+from typing import Optional, Tuple
+from uuid import uuid4
+
+from django.core.exceptions import ValidationError
+from django.db import models
+from django.utils import timezone
+
+from apps.telegram.models import TelegramToUserConnector
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+
+
+class TelegramVerificationCode(models.Model):
+ uuid = models.UUIDField(primary_key=True, default=uuid4, editable=False)
+ datetime = models.DateTimeField(auto_now_add=True)
+
+ user = models.OneToOneField(
+ "user_management.User", on_delete=models.CASCADE, related_name="telegram_verification_code"
+ )
+
+ @property
+ def is_active(self) -> bool:
+ return self.datetime + timezone.timedelta(days=1) < timezone.now()
+
+ @classmethod
+ def verify_user(
+ cls, uuid_code: str, telegram_chat_id: int, telegram_nick_name: str
+ ) -> Tuple[Optional[TelegramToUserConnector], bool]:
+ try:
+ verification_code = cls.objects.get(uuid=uuid_code)
+ user = verification_code.user
+
+ connector, created = TelegramToUserConnector.objects.get_or_create(
+ user=user, telegram_chat_id=telegram_chat_id, defaults={"telegram_nick_name": telegram_nick_name}
+ )
+
+ description = f"Telegram account of user {user.username} was connected"
+ create_organization_log(
+ user.organization,
+ user,
+ OrganizationLogType.TYPE_TELEGRAM_TO_USER_CONNECTED,
+ description,
+ )
+ return connector, created
+
+ except (ValidationError, cls.DoesNotExist):
+ return None, False
diff --git a/engine/apps/telegram/renderers/__init__.py b/engine/apps/telegram/renderers/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/telegram/renderers/keyboard.py b/engine/apps/telegram/renderers/keyboard.py
new file mode 100644
index 0000000000..ed13cd9f6a
--- /dev/null
+++ b/engine/apps/telegram/renderers/keyboard.py
@@ -0,0 +1,89 @@
+from enum import Enum
+from typing import List, Optional, Union
+
+from telegram import InlineKeyboardButton, InlineKeyboardMarkup
+
+from apps.alerts.models import AlertGroup
+from apps.telegram.utils import CallbackQueryFactory
+
+
+class Action(Enum):
+ ACKNOWLEDGE = "acknowledge"
+ UNACKNOWLEDGE = "unacknowledge"
+ RESOLVE = "resolve"
+ UNRESOLVE = "unresolve"
+ SILENCE = "silence"
+ UNSILENCE = "unsilence"
+
+
+class TelegramKeyboardRenderer:
+ def __init__(self, alert_group: AlertGroup):
+ self.alert_group = alert_group
+
+ # Inline keyboard with controls for alert group message
+ def render_actions_keyboard(self) -> Optional[InlineKeyboardMarkup]:
+ if self.alert_group.root_alert_group is not None:
+ # No keyboard for attached incident
+ return None
+
+ rows = []
+
+ # Acknowledge/Unacknowledge button
+ if not self.alert_group.resolved:
+ rows.append([self.acknowledge_button])
+
+ # Resolve/Unresolve buttons
+ rows.append([self.resolve_button])
+
+ # Silence/Unsilence buttons
+ if not self.alert_group.acknowledged and not self.alert_group.resolved:
+ if not self.alert_group.silenced:
+ rows.append(self.silence_buttons)
+ else:
+ rows.append([self.unsilence_button])
+
+ return InlineKeyboardMarkup(rows)
+
+ @staticmethod
+ def render_link_to_channel_keyboard(link: str) -> InlineKeyboardMarkup:
+ button = InlineKeyboardButton(text="Go to the incident", url=link)
+ return InlineKeyboardMarkup([[button]])
+
+ @property
+ def acknowledge_button(self) -> InlineKeyboardButton:
+ action = Action.ACKNOWLEDGE if not self.alert_group.acknowledged else Action.UNACKNOWLEDGE
+ return self._render_button(text=action.value.capitalize(), action=action)
+
+ @property
+ def resolve_button(self) -> InlineKeyboardButton:
+ action = Action.RESOLVE if not self.alert_group.resolved else Action.UNRESOLVE
+ return self._render_button(text=action.value.capitalize(), action=action)
+
+ @property
+ def silence_buttons(self) -> List[InlineKeyboardButton]:
+ silence_forever_button = self._render_button(text="🔕 forever", action=Action.SILENCE)
+
+ silence_delay_one_hour = 3600 # one hour
+ silence_one_hour_button = self._render_button(
+ text="... for 1h", action=Action.SILENCE, action_data=silence_delay_one_hour
+ )
+
+ silence_delay_four_hours = 14400 # four hours
+ silence_four_hours_button = self._render_button(
+ text="... for 4h", action=Action.SILENCE, action_data=silence_delay_four_hours
+ )
+
+ return [silence_forever_button, silence_one_hour_button, silence_four_hours_button]
+
+ @property
+ def unsilence_button(self) -> InlineKeyboardButton:
+ return self._render_button(text=Action.UNSILENCE.value.capitalize(), action=Action.UNSILENCE)
+
+ def _render_button(self, text: str, action: Action, action_data: Optional[Union[int, str]] = None):
+ callback_data_args = [self.alert_group.pk, action.value]
+ if action_data is not None:
+ callback_data_args.append(action_data)
+
+ button = InlineKeyboardButton(text=text, callback_data=CallbackQueryFactory.encode_data(*callback_data_args))
+
+ return button
diff --git a/engine/apps/telegram/renderers/message.py b/engine/apps/telegram/renderers/message.py
new file mode 100644
index 0000000000..1e989fe48d
--- /dev/null
+++ b/engine/apps/telegram/renderers/message.py
@@ -0,0 +1,99 @@
+from apps.alerts.incident_appearance.renderers.telegram_renderer import (
+ AlertGroupTelegramRenderer,
+ AlertTelegramRenderer,
+)
+from apps.alerts.incident_log_builder import IncidentLogBuilder
+from apps.alerts.models import AlertGroup, AlertGroupLogRecord
+from apps.base.models import UserNotificationPolicyLogRecord
+from apps.slack.slack_formatter import SlackFormatter
+from common.utils import is_string_with_visible_characters
+
+MAX_TELEGRAM_MESSAGE_LENGTH = 4096
+MESSAGE_TRIMMED_TEXT = "\n\nMessage is trimmed! See full incident here: {link}"
+
+
+class TelegramMessageRenderer:
+ def __init__(self, alert_group: AlertGroup):
+ self.alert_group = alert_group
+
+ def render_alert_group_message(self) -> str:
+ text = AlertGroupTelegramRenderer(self.alert_group).render()
+
+ if len(text) > MAX_TELEGRAM_MESSAGE_LENGTH:
+ text = self._trim_text(text)
+
+ return text
+
+ def render_log_message(self, max_message_length: int = MAX_TELEGRAM_MESSAGE_LENGTH) -> str:
+ start_line_text = "Incident log:\n"
+
+ slack_formatter = SlackFormatter(self.alert_group.channel.organization)
+ log_builder = IncidentLogBuilder(alert_group=self.alert_group)
+ log_records = log_builder.get_log_records_list()
+
+ log_lines = []
+ for log_record in log_records:
+ if isinstance(log_record, AlertGroupLogRecord):
+ log_line = log_record.rendered_incident_log_line(html=True)
+
+ # dirty hack to deal with attach / unattach logs
+ log_line = slack_formatter.render_text(log_line, process_markdown=True)
+ log_line = log_line.replace("", "").replace("
", "")
+
+ log_lines.append(log_line)
+ elif isinstance(log_record, UserNotificationPolicyLogRecord):
+ log_line = log_record.rendered_notification_log_line(html=True)
+ log_lines.append(log_line)
+
+ message_trimmed_text = MESSAGE_TRIMMED_TEXT.format(link=self.alert_group.web_link)
+ max_log_lines_length = max_message_length - len(start_line_text) - len(message_trimmed_text)
+ is_message_trimmed = len("\n".join(log_lines)) > max_log_lines_length
+ while len("\n".join(log_lines)) > max_log_lines_length:
+ log_lines.pop()
+
+ log_lines_text = "\n".join(log_lines)
+ if is_message_trimmed:
+ log_lines_text += message_trimmed_text
+
+ text = start_line_text + log_lines_text
+ return text
+
+ def render_actions_message(self) -> str:
+ if self.alert_group.root_alert_group is None:
+ text = "Actions available for this incident"
+ else:
+ # No actions for attached incidents
+ text = "No actions are available for this incident"
+
+ return text
+
+ def render_personal_message(self):
+ text = AlertGroupTelegramRenderer(self.alert_group).render()
+
+ if len(text) > MAX_TELEGRAM_MESSAGE_LENGTH:
+ return self._trim_text(text)
+
+ text += "\n" * 3 + self.render_log_message(max_message_length=MAX_TELEGRAM_MESSAGE_LENGTH - len(text))
+ return text
+
+ def render_link_to_channel_message(self, include_title: bool = True) -> str:
+ text = "👀 You are invited to look at the incident!"
+
+ if include_title:
+ first_alert_in_group = self.alert_group.alerts.first()
+ templated_alert = AlertTelegramRenderer(first_alert_in_group).templated_alert
+ if is_string_with_visible_characters(templated_alert.title):
+ text += f"\n#{self.alert_group.inside_organization_number}, {templated_alert.title} "
+
+ return text
+
+ def render_formatting_error_message(self) -> str:
+ return (
+ "You have a new incident, but Telegram can't render its content! "
+ f"Please check it out: {self.alert_group.web_link}"
+ )
+
+ def _trim_text(self, text: str) -> str:
+ trim_fallback_text = MESSAGE_TRIMMED_TEXT.format(link=self.alert_group.web_link)
+ text = text[: MAX_TELEGRAM_MESSAGE_LENGTH - len(trim_fallback_text)] + trim_fallback_text
+ return text
diff --git a/engine/apps/telegram/signals.py b/engine/apps/telegram/signals.py
new file mode 100644
index 0000000000..81d5287626
--- /dev/null
+++ b/engine/apps/telegram/signals.py
@@ -0,0 +1,10 @@
+from apps.alerts.signals import (
+ alert_create_signal,
+ alert_group_action_triggered_signal,
+ alert_group_update_log_report_signal,
+)
+from apps.telegram.alert_group_representative import AlertGroupTelegramRepresentative
+
+alert_create_signal.connect(AlertGroupTelegramRepresentative.on_create_alert)
+alert_group_action_triggered_signal.connect(AlertGroupTelegramRepresentative.on_alert_group_action_triggered)
+alert_group_update_log_report_signal.connect(AlertGroupTelegramRepresentative.on_alert_group_update_log_report)
diff --git a/engine/apps/telegram/tasks.py b/engine/apps/telegram/tasks.py
new file mode 100644
index 0000000000..fb1b2f969b
--- /dev/null
+++ b/engine/apps/telegram/tasks.py
@@ -0,0 +1,191 @@
+import logging
+
+from celery import uuid as celery_uuid
+from celery.utils.log import get_task_logger
+from django.apps import apps
+from django.conf import settings
+from telegram import error
+
+from apps.alerts.models import Alert, AlertGroup
+from apps.base.models import UserNotificationPolicy
+from apps.telegram.client import TelegramClient
+from apps.telegram.decorators import (
+ handle_missing_token,
+ ignore_bot_deleted,
+ ignore_message_to_edit_deleted,
+ ignore_message_unchanged,
+ ignore_reply_to_message_deleted,
+)
+from apps.telegram.models import TelegramMessage, TelegramToOrganizationConnector
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+from common.utils import OkToRetry
+
+logger = get_task_logger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+@shared_dedicated_queue_retry_task(
+ autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+@handle_missing_token
+def register_telegram_webhook(token=None):
+ telegram_client = TelegramClient(token=token)
+
+ try:
+ telegram_client.register_webhook()
+ except (error.InvalidToken, error.Unauthorized) as e:
+ logger.warning(f"Tried to register Telegram webhook using token: {telegram_client.token}, got error: {e}")
+
+
+@shared_dedicated_queue_retry_task(
+ bind=True, autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+@ignore_message_unchanged
+@ignore_message_to_edit_deleted
+@ignore_bot_deleted
+def edit_message(self, message_pk):
+ message = TelegramMessage.objects.get(pk=message_pk)
+ telegram_client = TelegramClient()
+
+ # if edit_task_id was not set at the time task was invoked, assign it and rerun the task
+ if message.edit_task_id is None:
+ task_id = celery_uuid()
+ message.edit_task_id = task_id
+ message.save(update_fields=["edit_task_id"])
+
+ edit_message.apply_async((message_pk,), task_id=task_id)
+ return
+
+ if message.edit_task_id != edit_message.request.id:
+ logger.debug("Dropping the task since another task was scheduled already.")
+ return
+
+ try:
+ telegram_client.edit_message(message=message)
+ except error.BadRequest as e:
+ if "Message is not modified" in e.message:
+ pass
+ except (error.RetryAfter, error.TimedOut) as e:
+ countdown = getattr(e, "retry_after", 3)
+
+ task_id = celery_uuid()
+ message.edit_task_id = task_id
+ message.save(update_fields=["edit_task_id"])
+
+ edit_message.apply_async((message_pk,), countdown=countdown, task_id=task_id)
+ return
+
+ message.edit_task_id = None
+ message.save(update_fields=["edit_task_id"])
+
+
+@shared_dedicated_queue_retry_task(bind=True, autoretry_for=(Exception,), retry_backoff=True, max_retries=None)
+def send_link_to_channel_message_or_fallback_to_full_incident(
+ self, alert_group_pk, notification_policy_pk, user_connector_pk
+):
+ TelegramToUserConnector = apps.get_model("telegram", "TelegramToUserConnector")
+
+ try:
+ user_connector = TelegramToUserConnector.objects.get(pk=user_connector_pk)
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
+ notification_policy = UserNotificationPolicy.objects.get(pk=notification_policy_pk)
+
+ # probably telegram message just didn't appear in Telegram channel yet
+ if self.request.retries <= 10:
+ user_connector.send_link_to_channel_message(
+ alert_group=alert_group, notification_policy=notification_policy
+ )
+ else:
+ # seems like the message won't appear in Telegram channel, so send the full incident to user
+ user_connector.send_full_incident(alert_group=alert_group, notification_policy=notification_policy)
+ except TelegramToUserConnector.DoesNotExist:
+ # Handle cases when user deleted the bot while escalation is active
+ logger.warning(
+ f"TelegramToUserConnector {user_connector_pk} not found. "
+ f"Most probably it was deleted while escalation was in progress."
+ f"alert_group {alert_group_pk}"
+ )
+
+
+@shared_dedicated_queue_retry_task(
+ bind=True, autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+@handle_missing_token
+@ignore_reply_to_message_deleted
+@ignore_bot_deleted
+def send_log_and_actions_message(self, channel_chat_id, group_chat_id, channel_message_id, reply_to_message_id):
+ with OkToRetry(task=self, exc=TelegramMessage.DoesNotExist, num_retries=5):
+ channel_message = TelegramMessage.objects.get(chat_id=channel_chat_id, message_id=channel_message_id)
+
+ if channel_message.discussion_group_message_id is None:
+ channel_message.discussion_group_message_id = reply_to_message_id
+ channel_message.save(update_fields=["discussion_group_message_id"])
+
+ alert_group = channel_message.alert_group
+
+ log_message_sent = alert_group.telegram_messages.filter(message_type=TelegramMessage.LOG_MESSAGE).exists()
+ actions_message_sent = alert_group.telegram_messages.filter(
+ message_type=TelegramMessage.ACTIONS_MESSAGE
+ ).exists()
+
+ telegram_client = TelegramClient()
+ with OkToRetry(
+ task=self, exc=(error.RetryAfter, error.TimedOut), compute_countdown=lambda e: getattr(e, "retry_after", 3)
+ ):
+ if not log_message_sent:
+ telegram_client.send_message(
+ chat_id=group_chat_id,
+ message_type=TelegramMessage.LOG_MESSAGE,
+ alert_group=alert_group,
+ reply_to_message_id=reply_to_message_id,
+ )
+ if not actions_message_sent:
+ telegram_client.send_message(
+ chat_id=group_chat_id,
+ message_type=TelegramMessage.ACTIONS_MESSAGE,
+ alert_group=alert_group,
+ reply_to_message_id=reply_to_message_id,
+ )
+
+
+@shared_dedicated_queue_retry_task(
+ bind=True, autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
+)
+@handle_missing_token
+@ignore_bot_deleted
+@ignore_reply_to_message_deleted
+def on_create_alert_telegram_representative_async(self, alert_pk):
+ """
+ It's async in order to prevent Telegram downtime or formatting issues causing delay with SMS and other destinations.
+ """
+
+ alert = Alert.objects.get(pk=alert_pk)
+ alert_group = alert.group
+
+ alert_group_messages = alert_group.telegram_messages.filter(
+ message_type__in=[
+ TelegramMessage.ALERT_GROUP_MESSAGE,
+ TelegramMessage.PERSONAL_MESSAGE,
+ TelegramMessage.FORMATTING_ERROR,
+ ]
+ )
+ # TODO: discuss moving this logic into .send_alert_group_message
+
+ telegram_channel = TelegramToOrganizationConnector.get_channel_for_alert_group(alert_group)
+
+ if telegram_channel is not None and not alert_group_messages.exists():
+ with OkToRetry(
+ task=self,
+ exc=(error.RetryAfter, error.TimedOut),
+ compute_countdown=lambda e: getattr(e, "retry_after", 3),
+ ):
+ telegram_channel.send_alert_group_message(alert_group)
+
+ messages_to_edit = alert_group_messages.filter(
+ message_type__in=(
+ TelegramMessage.ALERT_GROUP_MESSAGE,
+ TelegramMessage.PERSONAL_MESSAGE,
+ )
+ )
+ for message in messages_to_edit:
+ edit_message.delay(message_pk=message.pk)
diff --git a/engine/apps/telegram/tests/__init__.py b/engine/apps/telegram/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/telegram/tests/factories.py b/engine/apps/telegram/tests/factories.py
new file mode 100644
index 0000000000..c9edb2c23f
--- /dev/null
+++ b/engine/apps/telegram/tests/factories.py
@@ -0,0 +1,45 @@
+import factory
+
+from apps.telegram.models import (
+ TelegramChannelVerificationCode,
+ TelegramMessage,
+ TelegramToOrganizationConnector,
+ TelegramToUserConnector,
+ TelegramVerificationCode,
+)
+from common.utils import UniqueFaker
+
+
+class TelegramToUserConnectorFactory(factory.DjangoModelFactory):
+ telegram_chat_id = UniqueFaker("pyint")
+
+ class Meta:
+ model = TelegramToUserConnector
+
+
+class TelegramChannelFactory(factory.DjangoModelFactory):
+ channel_chat_id = factory.LazyAttribute(lambda v: str(UniqueFaker("pyint").generate()))
+ channel_name = factory.Faker("word")
+ discussion_group_chat_id = factory.LazyAttribute(lambda v: str(UniqueFaker("pyint").generate()))
+ discussion_group_name = factory.Faker("word")
+
+ class Meta:
+ model = TelegramToOrganizationConnector
+
+
+class TelegramVerificationCodeFactory(factory.DjangoModelFactory):
+ class Meta:
+ model = TelegramVerificationCode
+
+
+class TelegramChannelVerificationCodeFactory(factory.DjangoModelFactory):
+ class Meta:
+ model = TelegramChannelVerificationCode
+
+
+class TelegramMessageFactory(factory.DjangoModelFactory):
+ message_id = factory.Faker("pyint")
+ chat_id = factory.Faker("word")
+
+ class Meta:
+ model = TelegramMessage
diff --git a/engine/apps/telegram/tests/test_keyboard_renderer.py b/engine/apps/telegram/tests/test_keyboard_renderer.py
new file mode 100644
index 0000000000..a3e614be9d
--- /dev/null
+++ b/engine/apps/telegram/tests/test_keyboard_renderer.py
@@ -0,0 +1,138 @@
+from typing import List
+
+import pytest
+from telegram import InlineKeyboardButton
+
+from apps.alerts.models import AlertReceiveChannel
+from apps.telegram.renderers.keyboard import TelegramKeyboardRenderer
+
+
+def are_buttons_equal(button: InlineKeyboardButton, other: InlineKeyboardButton) -> bool:
+ return button.text == other.text and button.callback_data == other.callback_data and button.url == other.url
+
+
+def are_keyboards_equal(keyboard: List[List[InlineKeyboardButton]], other: List[List[InlineKeyboardButton]]) -> bool:
+ if len(keyboard) != len(other):
+ return False
+
+ for i in range(len(keyboard)):
+ row = keyboard[i]
+ other_row = other[i]
+
+ if len(row) != len(other_row):
+ return False
+
+ for j in range(len(row)):
+ button = row[j]
+ other_button = other_row[j]
+
+ if not are_buttons_equal(button, other_button):
+ return False
+
+ return True
+
+
+@pytest.mark.django_db
+def test_actions_keyboard_alerting(make_organization, make_alert_receive_channel, make_alert_group, make_alert):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA, verbal_name="Test integration"
+ )
+
+ alert_group = make_alert_group(alert_receive_channel)
+ make_alert(alert_group=alert_group, raw_request_data=alert_receive_channel.config.example_payload)
+
+ renderer = TelegramKeyboardRenderer(alert_group=alert_group)
+ keyboard = renderer.render_actions_keyboard()
+
+ expected_keyboard = [
+ [InlineKeyboardButton(text="Acknowledge", callback_data=f"{alert_group.pk}:acknowledge")],
+ [InlineKeyboardButton(text="Resolve", callback_data=f"{alert_group.pk}:resolve")],
+ [
+ InlineKeyboardButton(text="🔕 forever", callback_data=f"{alert_group.pk}:silence"),
+ InlineKeyboardButton(text="... for 1h", callback_data=f"{alert_group.pk}:silence:3600"),
+ InlineKeyboardButton(text="... for 4h", callback_data=f"{alert_group.pk}:silence:14400"),
+ ],
+ ]
+
+ assert are_keyboards_equal(keyboard.inline_keyboard, expected_keyboard) is True
+
+
+@pytest.mark.django_db
+def test_actions_keyboard_acknowledged(
+ make_organization_and_user, make_alert_receive_channel, make_alert_group, make_alert
+):
+ organization, user = make_organization_and_user()
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA, verbal_name="Test integration"
+ )
+
+ alert_group = make_alert_group(alert_receive_channel)
+ make_alert(alert_group=alert_group, raw_request_data=alert_receive_channel.config.example_payload)
+ alert_group.acknowledge_by_user(user)
+
+ renderer = TelegramKeyboardRenderer(alert_group=alert_group)
+ keyboard = renderer.render_actions_keyboard()
+
+ expected_keyboard = [
+ [InlineKeyboardButton(text="Unacknowledge", callback_data=f"{alert_group.pk}:unacknowledge")],
+ [InlineKeyboardButton(text="Resolve", callback_data=f"{alert_group.pk}:resolve")],
+ ]
+
+ assert are_keyboards_equal(keyboard.inline_keyboard, expected_keyboard) is True
+
+
+@pytest.mark.django_db
+def test_actions_keyboard_resolved(
+ make_organization_and_user, make_alert_receive_channel, make_alert_group, make_alert
+):
+ organization, user = make_organization_and_user()
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA, verbal_name="Test integration"
+ )
+
+ alert_group = make_alert_group(alert_receive_channel)
+ make_alert(alert_group=alert_group, raw_request_data=alert_receive_channel.config.example_payload)
+ alert_group.resolve_by_user(user)
+
+ renderer = TelegramKeyboardRenderer(alert_group=alert_group)
+ keyboard = renderer.render_actions_keyboard()
+
+ expected_keyboard = [
+ [InlineKeyboardButton(text="Unresolve", callback_data=f"{alert_group.pk}:unresolve")],
+ ]
+
+ assert are_keyboards_equal(keyboard.inline_keyboard, expected_keyboard) is True
+
+
+@pytest.mark.django_db
+def test_actions_keyboard_silenced(
+ make_organization_and_user, make_alert_receive_channel, make_alert_group, make_alert
+):
+ organization, user = make_organization_and_user()
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA, verbal_name="Test integration"
+ )
+
+ alert_group = make_alert_group(alert_receive_channel)
+ make_alert(alert_group=alert_group, raw_request_data=alert_receive_channel.config.example_payload)
+ alert_group.silence_by_user(user, silence_delay=None)
+
+ renderer = TelegramKeyboardRenderer(alert_group=alert_group)
+ keyboard = renderer.render_actions_keyboard()
+
+ expected_keyboard = [
+ [InlineKeyboardButton(text="Acknowledge", callback_data=f"{alert_group.pk}:acknowledge")],
+ [InlineKeyboardButton(text="Resolve", callback_data=f"{alert_group.pk}:resolve")],
+ [InlineKeyboardButton(text="Unsilence", callback_data=f"{alert_group.pk}:unsilence")],
+ ]
+
+ assert are_keyboards_equal(keyboard.inline_keyboard, expected_keyboard) is True
+
+
+@pytest.mark.django_db
+def test_link_to_channel_keyboard():
+ keyboard = TelegramKeyboardRenderer.render_link_to_channel_keyboard(link="http://test.com")
+ expected_keyboard = [[InlineKeyboardButton(text="Go to the incident", url="http://test.com")]]
+
+ assert are_keyboards_equal(keyboard.inline_keyboard, expected_keyboard) is True
diff --git a/engine/apps/telegram/tests/test_message_renderer.py b/engine/apps/telegram/tests/test_message_renderer.py
new file mode 100644
index 0000000000..44c1248e56
--- /dev/null
+++ b/engine/apps/telegram/tests/test_message_renderer.py
@@ -0,0 +1,208 @@
+import copy
+
+import pytest
+
+from apps.alerts.models import AlertGroupLogRecord, AlertReceiveChannel
+from apps.telegram.renderers.message import MAX_TELEGRAM_MESSAGE_LENGTH, MESSAGE_TRIMMED_TEXT, TelegramMessageRenderer
+
+
+@pytest.mark.django_db
+def test_alert_group_message_too_long_is_trimmed(
+ make_organization, make_alert_receive_channel, make_alert_group, make_alert
+):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA, verbal_name="Test integration"
+ )
+
+ alert_group = make_alert_group(alert_receive_channel)
+
+ payload = copy.deepcopy(alert_receive_channel.config.tests["payload"])
+ payload["labels"]["test"] = "test" * 2000
+
+ make_alert(alert_group=alert_group, raw_request_data=payload)
+
+ renderer = TelegramMessageRenderer(alert_group=alert_group)
+ text = renderer.render_alert_group_message()
+
+ assert len(text) <= MAX_TELEGRAM_MESSAGE_LENGTH
+ assert text.endswith(MESSAGE_TRIMMED_TEXT.format(link=alert_group.web_link))
+
+
+@pytest.mark.django_db
+def test_log_message(
+ make_organization_and_user,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_escalation_chain,
+ make_alert_group,
+ make_alert,
+ make_alert_group_log_record,
+):
+ organization, user = make_organization_and_user()
+ user_name = user.get_user_verbal_for_team_for_slack(organization)
+
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA, verbal_name="Test integration"
+ )
+ default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+ default_channel_filter.escalation_chain = make_escalation_chain(organization, name="test")
+
+ alert_group = make_alert_group(alert_receive_channel, channel_filter=default_channel_filter)
+ make_alert(alert_group=alert_group, raw_request_data=alert_receive_channel.config.example_payload)
+
+ make_alert_group_log_record(alert_group=alert_group, author=user, type=AlertGroupLogRecord.TYPE_ACK)
+
+ renderer = TelegramMessageRenderer(alert_group=alert_group)
+ text = renderer.render_log_message()
+
+ assert text == f"Incident log:\n0s: acknowledged by {user_name}"
+
+
+@pytest.mark.django_db
+def test_alert_group_message(make_organization, make_alert_receive_channel, make_alert_group, make_alert):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA, verbal_name="Test integration"
+ )
+
+ alert_group = make_alert_group(alert_receive_channel)
+ make_alert(alert_group=alert_group, raw_request_data=alert_receive_channel.config.tests["payload"])
+
+ renderer = TelegramMessageRenderer(alert_group=alert_group)
+ text = renderer.render_alert_group_message()
+
+ assert text == (
+ f"🔴 #{alert_group.inside_organization_number}, {alert_receive_channel.config.tests['telegram']['title']}\n"
+ "Alerting, alerts: 1\n"
+ "Source: Test integration - Grafana\n"
+ f"{alert_group.web_link}\n\n"
+ f"{alert_receive_channel.config.tests['telegram']['message']}"
+ )
+
+
+@pytest.mark.django_db
+def test_log_message_too_long_is_trimmed(
+ make_organization_and_user,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_alert_group,
+ make_alert,
+ make_alert_group_log_record,
+):
+ organization, user = make_organization_and_user()
+ user_name = user.get_user_verbal_for_team_for_slack(organization)
+
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA, verbal_name="Test integration"
+ )
+ default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+
+ alert_group = make_alert_group(alert_receive_channel, channel_filter=default_channel_filter)
+ make_alert(alert_group=alert_group, raw_request_data=alert_receive_channel.config.example_payload)
+
+ for _ in range(300):
+ make_alert_group_log_record(alert_group=alert_group, author=user, type=AlertGroupLogRecord.TYPE_RESOLVED)
+
+ renderer = TelegramMessageRenderer(alert_group=alert_group)
+ text = renderer.render_log_message()
+
+ assert len(text) <= MAX_TELEGRAM_MESSAGE_LENGTH
+
+ end_text = f"resolved by {user_name}" + MESSAGE_TRIMMED_TEXT.format(link=alert_group.web_link)
+ assert text.endswith(end_text)
+
+
+@pytest.mark.django_db
+def test_actions_message(
+ make_organization,
+ make_alert_receive_channel,
+ make_alert_group,
+):
+ organization = make_organization()
+
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+
+ renderer = TelegramMessageRenderer(alert_group=alert_group)
+ text = renderer.render_actions_message()
+
+ assert text == "Actions available for this incident"
+
+
+@pytest.mark.django_db
+def test_personal_message(
+ make_organization_and_user,
+ make_alert_receive_channel,
+ make_channel_filter,
+ make_escalation_chain,
+ make_alert_group,
+ make_alert,
+):
+ organization, user = make_organization_and_user()
+ user_name = user.get_user_verbal_for_team_for_slack(organization)
+
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA, verbal_name="Test integration"
+ )
+ default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
+ default_channel_filter.escalation_chain = make_escalation_chain(organization, name="test")
+
+ alert_group = make_alert_group(alert_receive_channel, channel_filter=default_channel_filter)
+ make_alert(alert_group=alert_group, raw_request_data=alert_receive_channel.config.tests["payload"])
+
+ alert_group.acknowledge_by_user(user)
+
+ renderer = TelegramMessageRenderer(alert_group=alert_group)
+ text = renderer.render_personal_message()
+
+ assert text == (
+ f"🟠 #{alert_group.inside_organization_number}, {alert_receive_channel.config.tests['telegram']['title']}\n"
+ f"Acknowledged by {user_name}, alerts: 1\n"
+ "Source: Test integration - Grafana\n"
+ f"{alert_group.web_link}\n\n"
+ f"{alert_receive_channel.config.tests['telegram']['message']}\n\n\n"
+ "Incident log:\n"
+ f"0s: acknowledged by {user_name}"
+ )
+
+
+@pytest.mark.django_db
+def test_link_to_channel_message(make_organization, make_alert_receive_channel, make_alert_group, make_alert):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA
+ )
+
+ alert_group = make_alert_group(alert_receive_channel)
+ make_alert(alert_group=alert_group, raw_request_data=alert_receive_channel.config.tests["payload"])
+
+ renderer = TelegramMessageRenderer(alert_group=alert_group)
+ text = renderer.render_link_to_channel_message()
+
+ assert text == (
+ f"👀 You are invited to look at the incident!\n"
+ f"#{alert_group.inside_organization_number}, {alert_receive_channel.config.tests['telegram']['title']} "
+ )
+
+
+@pytest.mark.django_db
+def test_formatting_error_message(
+ make_organization,
+ make_alert_receive_channel,
+ make_alert_group,
+):
+ organization = make_organization()
+ alert_receive_channel = make_alert_receive_channel(
+ organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA
+ )
+
+ alert_group = make_alert_group(alert_receive_channel)
+
+ renderer = TelegramMessageRenderer(alert_group=alert_group)
+ text = renderer.render_formatting_error_message()
+
+ assert text == (
+ "You have a new incident, but Telegram can't render its content! "
+ f"Please check it out: {alert_group.web_link}"
+ )
diff --git a/engine/apps/telegram/tests/test_models.py b/engine/apps/telegram/tests/test_models.py
new file mode 100644
index 0000000000..c5f962bcb8
--- /dev/null
+++ b/engine/apps/telegram/tests/test_models.py
@@ -0,0 +1,24 @@
+import pytest
+
+from apps.telegram.models.verification.personal import TelegramVerificationCode
+
+
+@pytest.mark.django_db
+def test_user_verification_handler_process_update_another_account_already_linked(
+ make_organization,
+ make_user_for_organization,
+ make_telegram_user_connector,
+ make_telegram_verification_code,
+):
+ organization = make_organization()
+ chat_id = 123
+ user_1 = make_user_for_organization(organization)
+ make_telegram_user_connector(user_1, telegram_chat_id=chat_id)
+
+ user_2 = make_user_for_organization(organization)
+ code = make_telegram_verification_code(user_2)
+ connector, created = TelegramVerificationCode.verify_user(code.uuid, chat_id, "nickname")
+
+ assert created
+ assert connector.telegram_chat_id == chat_id
+ assert connector.user == user_2
diff --git a/engine/apps/telegram/tests/test_update_handlers.py b/engine/apps/telegram/tests/test_update_handlers.py
new file mode 100644
index 0000000000..d74bcb5375
--- /dev/null
+++ b/engine/apps/telegram/tests/test_update_handlers.py
@@ -0,0 +1,123 @@
+from datetime import datetime
+from unittest.mock import patch
+
+import pytest
+from telegram import CallbackQuery, Chat, Message, Update, User
+
+from apps.telegram.client import TelegramClient
+from apps.telegram.renderers.keyboard import Action
+from apps.telegram.updates.update_handlers import ChannelVerificationCodeHandler, StartMessageHandler
+from apps.telegram.updates.update_handlers.button_press import ButtonPressHandler
+from apps.telegram.updates.update_handlers.start_message import START_TEXT
+from apps.telegram.updates.update_handlers.verification.channel import (
+ VERIFICATION_FAILED_DISCUSSION_GROUP_ALREADY_REGISTERED,
+)
+from apps.telegram.utils import CallbackQueryFactory
+
+
+def generate_update(message_text: str) -> Update:
+ user = User(id=0, first_name="Test", is_bot=False)
+ chat = Chat(id=0, type=Chat.PRIVATE)
+ message = Message(message_id=0, text=message_text, chat=chat, from_user=user, date=datetime.now())
+ update = Update(update_id=0, message=message)
+ return update
+
+
+def generate_channel_verification_code_message(verification_code: str, discussion_group_chat_id: str) -> Update:
+ user = User(id=0, first_name="Test", is_bot=False)
+ chat = Chat(id=discussion_group_chat_id, type=Chat.PRIVATE)
+ channel = Chat(id=0, type=Chat.CHANNEL)
+ message = Message(
+ message_id=0,
+ text=verification_code,
+ chat=chat,
+ from_user=user,
+ date=datetime.now(),
+ forward_from_chat=channel,
+ forward_signature="the-signature",
+ )
+ update = Update(update_id=0, message=message)
+ return update
+
+
+def generate_button_press_ack_message(chat_id, alert_group) -> Update:
+ user = User(id=chat_id, first_name="Test", is_bot=False)
+ callback_query = CallbackQuery(
+ id=0,
+ from_user=user,
+ chat_instance=Chat(id=chat_id, type=Chat.PRIVATE),
+ data=CallbackQueryFactory.encode_data(alert_group.pk, Action.ACKNOWLEDGE.value),
+ )
+ update = Update(update_id=0, callback_query=callback_query)
+ return update
+
+
+@pytest.mark.parametrize(
+ "text, matches", (("/start", True), ("start", False), ("/startx", False), ("/start smth", False))
+)
+def test_start_message_handler_matches(text, matches):
+ update = generate_update(message_text=text)
+ handler = StartMessageHandler(update=update)
+ assert handler.matches() is matches
+
+
+@pytest.mark.django_db
+def test_start_message_handler_process_update():
+ update = generate_update(message_text="/start")
+ handler = StartMessageHandler(update=update)
+
+ with patch.object(TelegramClient, "send_raw_message") as mock:
+ handler.process_update()
+ mock.assert_called_with(chat_id=update.message.from_user.id, text=START_TEXT)
+
+
+@pytest.mark.django_db
+def test_channel_verification_handler_process_update_duplicated_discussion_group_id(
+ make_organization, make_telegram_channel
+):
+ organization = make_organization()
+ existing_channel = make_telegram_channel(organization=organization)
+ chat_id = existing_channel.discussion_group_chat_id
+
+ update = generate_channel_verification_code_message(verification_code="123", discussion_group_chat_id=chat_id)
+ handler = ChannelVerificationCodeHandler(update=update)
+
+ with patch.object(TelegramClient, "is_chat_member") as mock_is_member:
+ mock_is_member.return_value = True
+ with patch.object(TelegramClient, "send_raw_message") as mock:
+ handler.process_update()
+ mock.assert_called_with(
+ chat_id=update.message.chat.id,
+ text=VERIFICATION_FAILED_DISCUSSION_GROUP_ALREADY_REGISTERED,
+ reply_to_message_id=update.message.message_id,
+ )
+
+
+@pytest.mark.django_db
+def test_button_press_handler_gets_user(
+ make_organization,
+ make_user_for_organization,
+ make_telegram_user_connector,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_alert,
+):
+ organization = make_organization()
+
+ chat_id = 123
+ user_1 = make_user_for_organization(organization)
+ make_telegram_user_connector(user_1, telegram_chat_id=chat_id)
+ user_2 = make_user_for_organization(organization)
+ make_telegram_user_connector(user_2, telegram_chat_id=chat_id)
+
+ alert_receive_channel = make_alert_receive_channel(organization=organization)
+ alert_group = make_alert_group(alert_receive_channel=alert_receive_channel)
+ make_alert(alert_group, "")
+
+ update = generate_button_press_ack_message(chat_id, alert_group)
+ handler = ButtonPressHandler(update=update)
+ handler.process_update()
+
+ alert_group.refresh_from_db()
+ assert alert_group.acknowledged
+ assert alert_group.acknowledged_by_user == user_2
diff --git a/engine/apps/telegram/updates/__init__.py b/engine/apps/telegram/updates/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/telegram/updates/update_handlers/__init__.py b/engine/apps/telegram/updates/update_handlers/__init__.py
new file mode 100644
index 0000000000..0dcd73d218
--- /dev/null
+++ b/engine/apps/telegram/updates/update_handlers/__init__.py
@@ -0,0 +1,6 @@
+from .update_handler import UpdateHandler # noqa: F401, isort: skip
+from .button_press import ButtonPressHandler # noqa: F401
+from .channel_to_group_forward import ChannelToGroupForwardHandler # noqa: F401
+from .start_message import StartMessageHandler # noqa: F401
+from .verification.channel import ChannelVerificationCodeHandler # noqa: F401
+from .verification.personal import PersonalVerificationCodeHandler # noqa: F401
diff --git a/engine/apps/telegram/updates/update_handlers/button_press.py b/engine/apps/telegram/updates/update_handlers/button_press.py
new file mode 100644
index 0000000000..55a8580e11
--- /dev/null
+++ b/engine/apps/telegram/updates/update_handlers/button_press.py
@@ -0,0 +1,97 @@
+import logging
+from dataclasses import dataclass
+from typing import Callable, Optional, Tuple
+
+from apps.alerts.constants import ActionSource
+from apps.alerts.models import AlertGroup
+from apps.telegram.models import TelegramToUserConnector
+from apps.telegram.renderers.keyboard import Action
+from apps.telegram.updates.update_handlers import UpdateHandler
+from apps.telegram.utils import CallbackQueryFactory
+from apps.user_management.models import User
+from common.constants.role import Role
+
+logger = logging.getLogger(__name__)
+
+PERMISSION_DENIED = """You don't have a permission to perform this action!
+Consider connecting your Telegram account on user settings page ⚙"""
+
+
+@dataclass
+class ActionContext:
+ alert_group: AlertGroup
+ action: Action
+ action_data: str
+
+
+class ButtonPressHandler(UpdateHandler):
+ def matches(self) -> bool:
+ is_callback_query = self.update.callback_query is not None
+ return is_callback_query
+
+ def process_update(self) -> None:
+ data = self.update.callback_query.data
+ action_context = self._get_action_context(data)
+
+ fn, fn_kwargs = self._map_action_context_to_fn(action_context)
+ user = self._get_user(action_context)
+
+ has_permission = self._check_permission(user=user, alert_group=action_context.alert_group)
+
+ if has_permission:
+ fn(user=user, action_source=ActionSource.TELEGRAM, **fn_kwargs)
+ logger.info(f"User {user} triggered '{fn.__name__}'")
+ else:
+ self.update.callback_query.answer(PERMISSION_DENIED, show_alert=True)
+ logger.info(f"User {user} has no permission to trigger '{fn.__name__}'")
+
+ def _get_user(self, action_context: ActionContext) -> Optional[User]:
+ connector = TelegramToUserConnector.objects.filter(
+ telegram_chat_id=self.update.effective_user.id,
+ user__organization=action_context.alert_group.channel.organization,
+ ).last()
+ if connector is not None:
+ return connector.user
+
+ @staticmethod
+ def _check_permission(user: Optional[User], alert_group: AlertGroup) -> bool:
+ if not user:
+ return False
+
+ return user.organization == alert_group.channel.organization and user.role in [Role.ADMIN, Role.EDITOR]
+
+ @staticmethod
+ def _get_action_context(data: str) -> ActionContext:
+ args = CallbackQueryFactory.decode_data(data)
+
+ alert_group_pk = args[0]
+ alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
+
+ action_name = args[1]
+ action = Action(action_name)
+
+ action_data = args[2] if len(args) >= 3 else None
+
+ return ActionContext(alert_group=alert_group, action=action, action_data=action_data)
+
+ @staticmethod
+ def _map_action_context_to_fn(action_context: ActionContext) -> Tuple[Callable, dict]:
+ action_to_fn = {
+ Action.RESOLVE: "resolve_by_user",
+ Action.UNRESOLVE: "un_resolve_by_user",
+ Action.ACKNOWLEDGE: "acknowledge_by_user",
+ Action.UNACKNOWLEDGE: "un_acknowledge_by_user",
+ Action.SILENCE: {
+ "fn_name": "silence_by_user",
+ "kwargs": {"silence_delay": int(action_context.action_data) if action_context.action_data else None},
+ },
+ Action.UNSILENCE: "un_silence_by_user",
+ }
+
+ fn_info = action_to_fn[action_context.action]
+ fn_name = fn_info["fn_name"] if isinstance(fn_info, dict) else fn_info
+ fn_kwargs = fn_info["kwargs"] if isinstance(fn_info, dict) else {}
+
+ fn = getattr(action_context.alert_group, fn_name)
+
+ return fn, fn_kwargs
diff --git a/engine/apps/telegram/updates/update_handlers/channel_to_group_forward.py b/engine/apps/telegram/updates/update_handlers/channel_to_group_forward.py
new file mode 100644
index 0000000000..c8e2f05bc9
--- /dev/null
+++ b/engine/apps/telegram/updates/update_handlers/channel_to_group_forward.py
@@ -0,0 +1,81 @@
+import logging
+import re
+
+from apps.telegram.client import TelegramClient
+from apps.telegram.models import TelegramToOrganizationConnector
+from apps.telegram.tasks import send_log_and_actions_message
+from apps.telegram.updates.update_handlers import UpdateHandler
+from apps.telegram.updates.update_handlers.verification.channel import (
+ CHANNEL_CONNECTED_TEXT,
+ RELINK_CHANNEL_TEXT,
+ WRONG_VERIFICATION_CODE,
+)
+from apps.telegram.utils import is_verification_message
+
+logger = logging.getLogger(__name__)
+
+TELEGRAM_ID = 777000
+SIGN_MESSAGES_NOT_ENABLED = """Please enable "Sign messages" in channel settings!
+Otherwise Grafana OnCall bot will not be able to operate properly!"""
+
+
+class ChannelToGroupForwardHandler(UpdateHandler):
+ def matches(self) -> bool:
+ is_message = self.update.message is not None and self.update.message.text is not None
+
+ if not is_message:
+ return False
+
+ is_from_discussion_group = self.update.message.chat.type == "supergroup"
+ is_forwarded_by_telegram = self.update.effective_user.id == TELEGRAM_ID
+
+ # Make sure that only alert group messages are processed with this handler
+ is_verification_successful_message = bool(
+ re.match(CHANNEL_CONNECTED_TEXT.format(organization_title=".*"), self.update.message.text_html)
+ )
+ is_relink_channel_message = bool(
+ re.match(RELINK_CHANNEL_TEXT.format(organization_title=".*"), self.update.message.text_html)
+ )
+ is_verification_failed_message = self.update.message.text == WRONG_VERIFICATION_CODE
+
+ return (
+ is_from_discussion_group
+ and is_forwarded_by_telegram
+ and not is_verification_message(self.update.message.text)
+ and not (is_verification_successful_message or is_relink_channel_message or is_verification_failed_message)
+ )
+
+ def process_update(self) -> None:
+ telegram_client = TelegramClient()
+
+ if self.update.message.forward_signature is None:
+ telegram_client.send_raw_message(
+ chat_id=self.update.message.chat.id,
+ text=SIGN_MESSAGES_NOT_ENABLED,
+ reply_to_message_id=self.update.message.message_id,
+ )
+ return
+
+ channel_chat_id = self.update.message.forward_from_chat.id
+ channel_message_id = self.update.message.forward_from_message_id
+ group_message_id = self.update.message.message_id
+
+ if self.update.message.forward_signature != telegram_client.api_client.first_name:
+ return
+
+ try:
+ connector = TelegramToOrganizationConnector.objects.get(channel_chat_id=channel_chat_id)
+ send_log_and_actions_message.delay(
+ channel_chat_id=connector.channel_chat_id,
+ group_chat_id=connector.discussion_group_chat_id,
+ channel_message_id=channel_message_id,
+ reply_to_message_id=group_message_id,
+ )
+
+ except TelegramToOrganizationConnector.DoesNotExist:
+ logger.warning(
+ f"Tried to send log and action message to comments, but organization deleted the channel connector. "
+ f"Channel chat id: {channel_chat_id}. "
+ f"Channel message id: {channel_message_id}. "
+ f"Group message id: {group_message_id}."
+ )
diff --git a/engine/apps/telegram/updates/update_handlers/start_message.py b/engine/apps/telegram/updates/update_handlers/start_message.py
new file mode 100644
index 0000000000..f70ead7d6c
--- /dev/null
+++ b/engine/apps/telegram/updates/update_handlers/start_message.py
@@ -0,0 +1,35 @@
+from apps.telegram.client import TelegramClient
+from apps.telegram.models import TelegramToUserConnector
+from apps.telegram.updates.update_handlers.update_handler import UpdateHandler
+
+START_TEXT = """Hi!
+This is Grafana OnCall notification bot. You can connect your Grafana OnCall account to Telegram on user settings page.
+"""
+
+START_TEXT_FOR_CONNECTED_USER = """Hi!
+This is Grafana OnCall notification bot. Your Telegram account is connected to user {username}
+"""
+
+
+class StartMessageHandler(UpdateHandler):
+ def matches(self) -> bool:
+ is_message = self.update.message is not None and self.update.message.text is not None
+
+ if not is_message:
+ return False
+
+ is_from_private_chat = self.update.message.chat.type == "private"
+ is_start_message = self.update.message.text == "/start"
+
+ return is_from_private_chat and is_start_message
+
+ def process_update(self) -> None:
+ connector = TelegramToUserConnector.objects.filter(telegram_chat_id=self.update.effective_user.id).first()
+ telegram_client = TelegramClient()
+
+ if connector is not None:
+ user = connector.user
+ text = START_TEXT_FOR_CONNECTED_USER.format(username=user.username)
+ telegram_client.send_raw_message(chat_id=self.update.effective_user.id, text=text)
+ else:
+ telegram_client.send_raw_message(chat_id=self.update.effective_user.id, text=START_TEXT)
diff --git a/engine/apps/telegram/updates/update_handlers/update_handler.py b/engine/apps/telegram/updates/update_handlers/update_handler.py
new file mode 100644
index 0000000000..eca2a25805
--- /dev/null
+++ b/engine/apps/telegram/updates/update_handlers/update_handler.py
@@ -0,0 +1,21 @@
+from abc import ABC, abstractmethod
+
+from telegram import Update
+
+
+class UpdateHandler(ABC):
+ """
+ Update handler for Telegram update
+ After making new handler by subclassing this abstract class, make sure to add it to __init__.py
+ """
+
+ def __init__(self, update: Update):
+ self.update = update
+
+ @abstractmethod
+ def matches(self) -> bool:
+ pass
+
+ @abstractmethod
+ def process_update(self) -> None:
+ pass
diff --git a/engine/apps/telegram/updates/update_handlers/verification/__init__.py b/engine/apps/telegram/updates/update_handlers/verification/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/telegram/updates/update_handlers/verification/channel.py b/engine/apps/telegram/updates/update_handlers/verification/channel.py
new file mode 100644
index 0000000000..edd9f787bb
--- /dev/null
+++ b/engine/apps/telegram/updates/update_handlers/verification/channel.py
@@ -0,0 +1,91 @@
+from apps.telegram.client import TelegramClient
+from apps.telegram.models import TelegramChannelVerificationCode, TelegramToOrganizationConnector
+from apps.telegram.updates.update_handlers import UpdateHandler
+from apps.telegram.utils import is_verification_message
+
+TELEGRAM_ID = 777000
+
+VERIFICATION_FAILED_BOT_NOT_IN_CHANNEL = """Verification failed!
+Please add the Grafana OnCall bot to the "{channel_name}" channel as admin and allow it to post messages."""
+VERIFICATION_FAILED_SIGN_MESSAGES_NOT_ENABLED = """Verification failed!
+Please enable "Sign messages" in channel settings, otherwise Grafana OnCall bot will not be able to operate properly."""
+VERIFICATION_FAILED_DISCUSSION_GROUP_ALREADY_REGISTERED = """Verification failed!
+The associated discussion group has already been registered with a different channel."""
+
+CHANNEL_CONNECTED_TEXT = "Done! This channel is now linked to organization {organization_title} 🎉 "
+RELINK_CHANNEL_TEXT = """This Telegram channel is already connected to organization {organization_title} .
+Please unlink Telegram channel in settings of organization {organization_title} or contact Grafana OnCall support"""
+WRONG_VERIFICATION_CODE = "Verification failed: wrong verification code"
+
+
+class ChannelVerificationCodeHandler(UpdateHandler):
+ def matches(self) -> bool:
+ is_message = self.update.message is not None and self.update.message.text is not None
+
+ if not is_message:
+ return False
+
+ is_from_discussion_group = self.update.message.chat.type == "supergroup"
+ is_forwarded_by_telegram = self.update.effective_user.id == TELEGRAM_ID
+
+ return (
+ is_verification_message(self.update.message.text) and is_from_discussion_group and is_forwarded_by_telegram
+ )
+
+ def process_update(self) -> None:
+ telegram_client = TelegramClient()
+
+ channel_chat_id = self.update.message.forward_from_chat.id
+ channel_name = self.update.message.forward_from_chat.title
+ discussion_group_chat_id = self.update.message.chat.id
+ discussion_group_name = self.update.message.chat.title
+ verification_code = self.update.message.text
+
+ # check if bot is in channel
+ if not telegram_client.is_chat_member(chat_id=channel_chat_id):
+ telegram_client.send_raw_message(
+ chat_id=self.update.message.chat.id,
+ text=VERIFICATION_FAILED_BOT_NOT_IN_CHANNEL.format(channel_name=channel_name),
+ reply_to_message_id=self.update.message.message_id,
+ )
+ return
+
+ # check if "Sign messages" is enabled
+ if self.update.message.forward_signature is None:
+ telegram_client.send_raw_message(
+ chat_id=self.update.message.chat.id,
+ text=VERIFICATION_FAILED_SIGN_MESSAGES_NOT_ENABLED,
+ reply_to_message_id=self.update.message.message_id,
+ )
+ return
+
+ # check discussion group chat is not reused
+ connector = TelegramToOrganizationConnector.objects.filter(
+ discussion_group_chat_id=discussion_group_chat_id
+ ).first()
+ if connector is not None and connector.channel_chat_id != channel_chat_id:
+ # discussion group is already connected to a different channel chat
+ telegram_client.send_raw_message(
+ chat_id=self.update.message.chat.id,
+ text=VERIFICATION_FAILED_DISCUSSION_GROUP_ALREADY_REGISTERED,
+ reply_to_message_id=self.update.message.message_id,
+ )
+ return
+
+ connector, created = TelegramChannelVerificationCode.verify_channel_and_discussion_group(
+ uuid_code=verification_code,
+ channel_chat_id=channel_chat_id,
+ channel_name=channel_name,
+ discussion_group_chat_id=discussion_group_chat_id,
+ discussion_group_name=discussion_group_name,
+ )
+
+ if created:
+ reply_text = CHANNEL_CONNECTED_TEXT.format(organization_title=connector.organization.org_title)
+ else:
+ if connector is not None:
+ reply_text = RELINK_CHANNEL_TEXT.format(organization_title=connector.organization.org_title)
+ else:
+ reply_text = WRONG_VERIFICATION_CODE
+
+ telegram_client.send_raw_message(chat_id=channel_chat_id, text=reply_text)
diff --git a/engine/apps/telegram/updates/update_handlers/verification/personal.py b/engine/apps/telegram/updates/update_handlers/verification/personal.py
new file mode 100644
index 0000000000..f765ca4281
--- /dev/null
+++ b/engine/apps/telegram/updates/update_handlers/verification/personal.py
@@ -0,0 +1,48 @@
+from apps.telegram.client import TelegramClient
+from apps.telegram.models import TelegramVerificationCode
+from apps.telegram.updates.update_handlers import UpdateHandler
+from apps.telegram.utils import is_verification_message
+
+USER_CONNECTED_TEXT = "Done! This Telegram account is now linked to {username} 🎉"
+RELINK_ACCOUNT_TEXT = """This Telegram account is already connected to Grafana OnCall user {username}
+Please unlink Telegram account in profile settings of user {username} or contact Grafana OnCall support."""
+WRONG_VERIFICATION_CODE = "Verification failed: wrong verification code"
+
+
+class PersonalVerificationCodeHandler(UpdateHandler):
+ def matches(self) -> bool:
+ is_message = self.update.message is not None and self.update.message.text is not None
+
+ if not is_message:
+ return False
+
+ is_from_private_chat = self.update.message.chat.type == "private"
+
+ split_entries = self.update.message.text.split()
+ is_deeplink_start = (
+ len(split_entries) == 2 and split_entries[0] == "/start" and is_verification_message(split_entries[1])
+ )
+
+ return is_from_private_chat and (is_deeplink_start or is_verification_message(self.update.message.text))
+
+ def process_update(self) -> None:
+ user = self.update.effective_user
+ nickname = user.username or user.first_name or user.last_name or "Unknown"
+
+ text = self.update.message.text
+ verification_code = text if is_verification_message(text) else text.split()[1]
+
+ connector, created = TelegramVerificationCode.verify_user(
+ uuid_code=verification_code, telegram_chat_id=user.id, telegram_nick_name=nickname
+ )
+
+ if created:
+ reply_text = USER_CONNECTED_TEXT.format(username=connector.user.username)
+ else:
+ if connector is not None:
+ reply_text = RELINK_ACCOUNT_TEXT.format(username=connector.user.username)
+ else:
+ reply_text = WRONG_VERIFICATION_CODE
+
+ telegram_client = TelegramClient()
+ telegram_client.send_raw_message(chat_id=user.id, text=reply_text)
diff --git a/engine/apps/telegram/updates/update_manager.py b/engine/apps/telegram/updates/update_manager.py
new file mode 100644
index 0000000000..4383842ddd
--- /dev/null
+++ b/engine/apps/telegram/updates/update_manager.py
@@ -0,0 +1,76 @@
+import logging
+from typing import Optional
+
+from rest_framework.request import Request
+from telegram import Bot, Update
+
+from apps.base.utils import live_settings
+from apps.telegram.models import TelegramToOrganizationConnector, TelegramToUserConnector
+from apps.telegram.updates.update_handlers.update_handler import UpdateHandler
+
+logger = logging.getLogger(__name__)
+
+TELEGRAM_ID = 777000
+
+
+class UpdateManager:
+ """
+ Manager for Telegram updates
+ It selects appropriate UpdateHandler and makes selected handler process the update
+ Also UpdateManager updates user, channel and group names on every update to make sure names in database are in sync
+ """
+
+ @classmethod
+ def process_update(cls, update: Update) -> None:
+ cls._update_entity_names(update)
+
+ handler = cls.select_update_handler(update)
+ if handler is None:
+ logger.info("No update handlers applied for update")
+ return
+
+ logger.info(f"Processing update with handler: {handler.__class__.__name__}")
+ handler.process_update()
+
+ @staticmethod
+ def select_update_handler(update: Update) -> Optional[UpdateHandler]:
+ handler_classes = UpdateHandler.__subclasses__()
+ for handler_class in handler_classes:
+ handler = handler_class(update)
+ if handler.matches():
+ return handler
+
+ @classmethod
+ def process_request(cls, request: Request) -> None:
+ update = Update.de_json(request.data, bot=Bot(live_settings.TELEGRAM_TOKEN))
+ logger.info(f"Update from Telegram: {update}")
+ cls.process_update(update)
+
+ @classmethod
+ def _update_entity_names(cls, update: Update) -> None:
+ if update.effective_user is None:
+ return
+
+ if update.effective_user.id == TELEGRAM_ID:
+ cls._update_channel_and_group_names(update)
+ else:
+ cls._update_user_names(update)
+
+ @staticmethod
+ def _update_channel_and_group_names(update: Update) -> None:
+ channel_chat_id = update.message.forward_from_chat.id
+ channel_name = update.message.forward_from_chat.title
+
+ discussion_group_chat_id = update.message.chat.id
+ discussion_group_name = update.message.chat.title
+
+ TelegramToOrganizationConnector.objects.filter(
+ channel_chat_id=channel_chat_id, discussion_group_chat_id=discussion_group_chat_id
+ ).update(channel_name=channel_name, discussion_group_name=discussion_group_name)
+
+ @staticmethod
+ def _update_user_names(update: Update) -> None:
+ user = update.effective_user
+ telegram_nick_name = user.username or user.first_name or user.last_name or "Unknown"
+
+ TelegramToUserConnector.objects.filter(telegram_chat_id=user.id).update(telegram_nick_name=telegram_nick_name)
diff --git a/engine/apps/telegram/urls.py b/engine/apps/telegram/urls.py
new file mode 100644
index 0000000000..a1bb29e6da
--- /dev/null
+++ b/engine/apps/telegram/urls.py
@@ -0,0 +1,7 @@
+from django.urls import path
+
+from .views import WebHookView
+
+urlpatterns = [
+ path("", WebHookView.as_view()),
+]
diff --git a/engine/apps/telegram/utils.py b/engine/apps/telegram/utils.py
new file mode 100644
index 0000000000..9568666efd
--- /dev/null
+++ b/engine/apps/telegram/utils.py
@@ -0,0 +1,20 @@
+import re
+from typing import List, Union
+
+UUID4_REGEX = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+
+
+def is_verification_message(text: str) -> bool:
+ return bool(re.match(UUID4_REGEX, text))
+
+
+class CallbackQueryFactory:
+ SEPARATOR = ":"
+
+ @classmethod
+ def encode_data(cls, *args: Union[str, int]) -> str:
+ return cls.SEPARATOR.join(map(str, args))
+
+ @classmethod
+ def decode_data(cls, data: str) -> List[str]:
+ return data.split(cls.SEPARATOR)
diff --git a/engine/apps/telegram/views.py b/engine/apps/telegram/views.py
new file mode 100644
index 0000000000..7674071177
--- /dev/null
+++ b/engine/apps/telegram/views.py
@@ -0,0 +1,16 @@
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from apps.telegram.tasks import register_telegram_webhook
+from apps.telegram.updates.update_manager import UpdateManager
+
+register_telegram_webhook.delay()
+
+
+class WebHookView(APIView):
+ def get(self, request, format=None):
+ return Response("hello")
+
+ def post(self, request):
+ UpdateManager.process_request(request)
+ return Response(status=200)
diff --git a/engine/apps/twilioapp/__init__.py b/engine/apps/twilioapp/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/twilioapp/admin.py b/engine/apps/twilioapp/admin.py
new file mode 100644
index 0000000000..c769ff5c0c
--- /dev/null
+++ b/engine/apps/twilioapp/admin.py
@@ -0,0 +1,17 @@
+from django.contrib import admin
+
+from common.admin import CustomModelAdmin
+
+from .models import SMSMessage, TwilioLogRecord
+
+
+@admin.register(TwilioLogRecord)
+class TwilioLogRecordAdmin(CustomModelAdmin):
+ list_display = ("id", "user", "phone_number", "type", "status", "succeed", "created_at")
+ list_filter = ("created_at", "type", "status", "succeed")
+
+
+@admin.register(SMSMessage)
+class SMSMessageAdmin(CustomModelAdmin):
+ list_display = ("id", "receiver", "represents_alert_group", "notification_policy", "created_at")
+ list_filter = ("created_at",)
diff --git a/engine/apps/twilioapp/constants.py b/engine/apps/twilioapp/constants.py
new file mode 100644
index 0000000000..5785077e43
--- /dev/null
+++ b/engine/apps/twilioapp/constants.py
@@ -0,0 +1,108 @@
+class TwilioMessageStatuses(object):
+ """
+ https://www.twilio.com/docs/sms/tutorials/how-to-confirm-delivery-python?code-sample=code-handle-a-sms-statuscallback&code-language=Python&code-sdk-version=5.x#receive-status-events-in-your-web-application
+ https://www.twilio.com/docs/sms/api/message-resource#message-status-values
+ """
+
+ ACCEPTED = 10
+ QUEUED = 20
+ SENDING = 30
+ SENT = 40
+ FAILED = 50
+ DELIVERED = 60
+ UNDELIVERED = 70
+ RECEIVING = 80
+ RECEIVED = 90
+ READ = 100
+
+ CHOICES = (
+ (ACCEPTED, "accepted"),
+ (QUEUED, "queued"),
+ (SENDING, "sending"),
+ (SENT, "sent"),
+ (FAILED, "failed"),
+ (DELIVERED, "delivered"),
+ (UNDELIVERED, "undelivered"),
+ (RECEIVING, "receiving"),
+ (RECEIVED, "received"),
+ (READ, "read"),
+ )
+
+ DETERMINANT = {
+ "accepted": ACCEPTED,
+ "queued": QUEUED,
+ "sending": SENDING,
+ "sent": SENT,
+ "failed": FAILED,
+ "delivered": DELIVERED,
+ "undelivered": UNDELIVERED,
+ "receiving": RECEIVING,
+ "received": RECEIVED,
+ "read": READ,
+ }
+
+
+class TwilioCallStatuses(object):
+ """
+ https://www.twilio.com/docs/voice/twiml#callstatus-values
+ """
+
+ QUEUED = 10
+ RINGING = 20
+ IN_PROGRESS = 30
+ COMPLETED = 40
+ BUSY = 50
+ FAILED = 60
+ NO_ANSWER = 70
+ CANCELED = 80
+
+ CHOICES = (
+ (QUEUED, "queued"),
+ (RINGING, "ringing"),
+ (IN_PROGRESS, "in-progress"),
+ (COMPLETED, "completed"),
+ (BUSY, "busy"),
+ (FAILED, "failed"),
+ (NO_ANSWER, "no-answer"),
+ (CANCELED, "canceled"),
+ )
+
+ DETERMINANT = {
+ "queued": QUEUED,
+ "ringing": RINGING,
+ "in-progress": IN_PROGRESS,
+ "completed": COMPLETED,
+ "busy": BUSY,
+ "failed": FAILED,
+ "no-answer": NO_ANSWER,
+ "canceled": CANCELED,
+ }
+
+
+class TwilioLogRecordType(object):
+ VERIFICATION_START = 10
+ VERIFICATION_CHECK = 20
+
+ CHOICES = ((VERIFICATION_START, "verification start"), (VERIFICATION_CHECK, "verification check"))
+
+
+class TwilioLogRecordStatus(object):
+ # For verification and check it has used the same statuses
+ # https://www.twilio.com/docs/verify/api/verification#verification-response-properties
+ # https://www.twilio.com/docs/verify/api/verification-check
+
+ PENDING = 10
+ APPROVED = 20
+ DENIED = 30
+ # Our customized status for TwilioException
+ ERROR = 40
+
+ CHOICES = ((PENDING, "pending"), (APPROVED, "approved"), (DENIED, "denied"), (ERROR, "error"))
+
+ DETERMINANT = {"pending": PENDING, "approved": APPROVED, "denied": DENIED, "error": ERROR}
+
+
+TEST_CALL_TEXT = (
+ "You are invited to check an incident from Grafana OnCall. "
+ "Alert via {channel_name} with title {alert_group_name} triggered {alerts_count} times"
+)
diff --git a/engine/apps/twilioapp/migrations/0001_squashed_initial.py b/engine/apps/twilioapp/migrations/0001_squashed_initial.py
new file mode 100644
index 0000000000..cd76ebdfb2
--- /dev/null
+++ b/engine/apps/twilioapp/migrations/0001_squashed_initial.py
@@ -0,0 +1,60 @@
+# Generated by Django 3.2.5 on 2022-05-31 14:46
+
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ('user_management', '0001_squashed_initial'),
+ ('base', '0002_squashed_initial'),
+ ('alerts', '0001_squashed_initial'),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='TwilioLogRecord',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('phone_number', models.CharField(max_length=16)),
+ ('type', models.PositiveSmallIntegerField(choices=[(10, 'verification start'), (20, 'verification check')], default=10)),
+ ('status', models.PositiveSmallIntegerField(choices=[(10, 'pending'), (20, 'approved'), (30, 'denied'), (40, 'error')], default=10)),
+ ('payload', models.TextField(default=None, null=True)),
+ ('error_message', models.TextField(default=None, null=True)),
+ ('succeed', models.BooleanField(default=False)),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user_management.user')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='SMSMessage',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('exceeded_limit', models.BooleanField(default=None, null=True)),
+ ('status', models.PositiveSmallIntegerField(blank=True, choices=[(10, 'accepted'), (20, 'queued'), (30, 'sending'), (40, 'sent'), (50, 'failed'), (60, 'delivered'), (70, 'undelivered'), (80, 'receiving'), (90, 'received'), (100, 'read')], null=True)),
+ ('sid', models.CharField(blank=True, max_length=50)),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('notification_policy', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='base.usernotificationpolicy')),
+ ('receiver', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='user_management.user')),
+ ('represents_alert', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='alerts.alert')),
+ ('represents_alert_group', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='alerts.alertgroup')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='PhoneCall',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('exceeded_limit', models.BooleanField(default=None, null=True)),
+ ('status', models.PositiveSmallIntegerField(blank=True, choices=[(10, 'queued'), (20, 'ringing'), (30, 'in-progress'), (40, 'completed'), (50, 'busy'), (60, 'failed'), (70, 'no-answer'), (80, 'canceled')], null=True)),
+ ('sid', models.CharField(blank=True, max_length=50)),
+ ('created_at', models.DateTimeField(auto_now_add=True)),
+ ('notification_policy', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='base.usernotificationpolicy')),
+ ('receiver', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='user_management.user')),
+ ('represents_alert', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='alerts.alert')),
+ ('represents_alert_group', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='alerts.alertgroup')),
+ ],
+ ),
+ ]
diff --git a/engine/apps/twilioapp/migrations/__init__.py b/engine/apps/twilioapp/migrations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/twilioapp/models/__init__.py b/engine/apps/twilioapp/models/__init__.py
new file mode 100644
index 0000000000..b3d32d81c6
--- /dev/null
+++ b/engine/apps/twilioapp/models/__init__.py
@@ -0,0 +1,3 @@
+from .phone_call import PhoneCall # noqa: F401
+from .sms_message import SMSMessage # noqa: F401
+from .twilio_log_record import TwilioLogRecord # noqa: F401
diff --git a/engine/apps/twilioapp/models/phone_call.py b/engine/apps/twilioapp/models/phone_call.py
new file mode 100644
index 0000000000..7d5ae0f9c7
--- /dev/null
+++ b/engine/apps/twilioapp/models/phone_call.py
@@ -0,0 +1,216 @@
+import logging
+
+from django.apps import apps
+from django.db import models
+from twilio.base.exceptions import TwilioRestException
+
+from apps.alerts.constants import ActionSource
+from apps.alerts.incident_appearance.renderers.phone_call_renderer import AlertGroupPhoneCallRenderer
+from apps.alerts.signals import user_notification_action_triggered_signal
+from apps.twilioapp.constants import TwilioCallStatuses
+from apps.twilioapp.twilio_client import twilio_client
+
+logger = logging.getLogger(__name__)
+
+
+class PhoneCallManager(models.Manager):
+ def update_status(self, call_sid, call_status):
+ """The function checks existence of PhoneCall instance
+ according to call_sid and updates status on message_status
+
+ Args:
+ call_sid (str): sid of Twilio call
+ call_status (str): new status
+
+ Returns:
+
+ """
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+
+ if call_sid and call_status:
+ phone_call_qs = self.filter(sid=call_sid)
+
+ status = TwilioCallStatuses.DETERMINANT.get(call_status)
+
+ if phone_call_qs.exists() and status:
+ phone_call_qs.update(status=status)
+
+ phone_call = phone_call_qs.first()
+ log_record = None
+ if status == TwilioCallStatuses.COMPLETED:
+ log_record = UserNotificationPolicyLogRecord(
+ author=phone_call.receiver,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_SUCCESS,
+ notification_policy=phone_call.notification_policy,
+ alert_group=phone_call.represents_alert_group,
+ notification_step=phone_call.notification_policy.step
+ if phone_call.notification_policy
+ else None,
+ notification_channel=phone_call.notification_policy.notify_by
+ if phone_call.notification_policy
+ else None,
+ )
+ elif status in [TwilioCallStatuses.FAILED, TwilioCallStatuses.BUSY, TwilioCallStatuses.NO_ANSWER]:
+ log_record = UserNotificationPolicyLogRecord(
+ author=phone_call.receiver,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=phone_call.notification_policy,
+ alert_group=phone_call.represents_alert_group,
+ notification_error_code=PhoneCall.get_error_code_by_twilio_status(status),
+ notification_step=phone_call.notification_policy.step
+ if phone_call.notification_policy
+ else None,
+ notification_channel=phone_call.notification_policy.notify_by
+ if phone_call.notification_policy
+ else None,
+ )
+
+ if log_record is not None:
+ log_record.save()
+ user_notification_action_triggered_signal.send(
+ sender=PhoneCall.objects.update_status, log_record=log_record
+ )
+
+ def get_and_process_digit(self, call_sid, digit):
+ """The function get Phone Call instance according to call_sid
+ and run process of pressed digit
+
+ Args:
+ call_sid (str):
+ digit (str):
+
+ Returns:
+
+ """
+ if call_sid and digit:
+ phone_call = self.filter(sid=call_sid).first()
+
+ if phone_call:
+ phone_call.process_digit(digit=digit)
+
+
+class PhoneCall(models.Model):
+
+ objects = PhoneCallManager()
+
+ exceeded_limit = models.BooleanField(null=True, default=None)
+ represents_alert = models.ForeignKey("alerts.Alert", on_delete=models.SET_NULL, null=True, default=None)
+ represents_alert_group = models.ForeignKey("alerts.AlertGroup", on_delete=models.SET_NULL, null=True, default=None)
+ notification_policy = models.ForeignKey(
+ "base.UserNotificationPolicy", on_delete=models.SET_NULL, null=True, default=None
+ )
+
+ receiver = models.ForeignKey("user_management.User", on_delete=models.CASCADE, null=True, default=None)
+
+ status = models.PositiveSmallIntegerField(
+ blank=True,
+ null=True,
+ choices=TwilioCallStatuses.CHOICES,
+ )
+
+ sid = models.CharField(
+ blank=True,
+ max_length=50,
+ )
+
+ created_at = models.DateTimeField(auto_now_add=True)
+
+ def process_digit(self, digit):
+ """The function process pressed digit at time of call to user
+
+ Args:
+ digit (str):
+
+ Returns:
+
+ """
+ alert_group = self.represents_alert_group
+
+ if digit == "1":
+ alert_group.acknowledge_by_user(self.receiver, action_source=ActionSource.TWILIO)
+ elif digit == "2":
+ alert_group.resolve_by_user(self.receiver, action_source=ActionSource.TWILIO)
+ elif digit == "3":
+ alert_group.silence_by_user(self.receiver, silence_delay=1800, action_source=ActionSource.TWILIO)
+
+ @property
+ def created_for_slack(self):
+ return bool(self.represents_alert_group.slack_message)
+
+ @classmethod
+ def make_call(cls, user, alert_group, notification_policy):
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+
+ organization = alert_group.channel.organization
+
+ log_record = None
+ if user.verified_phone_number:
+ # Create a PhoneCall object in db
+ phone_call = PhoneCall(
+ represents_alert_group=alert_group,
+ receiver=user,
+ notification_policy=notification_policy,
+ )
+
+ phone_calls_left = organization.phone_calls_left(user)
+
+ if phone_calls_left > 0:
+ phone_call.exceeded_limit = False
+ renderer = AlertGroupPhoneCallRenderer(alert_group)
+ message_body = renderer.render()
+ if phone_calls_left < 3:
+ message_body += " {} phone calls left. Contact your admin.".format(phone_calls_left)
+ try:
+ twilio_call = twilio_client.make_call(message_body, user.verified_phone_number)
+ except TwilioRestException:
+ log_record = UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_NOT_ABLE_TO_CALL,
+ notification_step=notification_policy.step if notification_policy else None,
+ notification_channel=notification_policy.notify_by if notification_policy else None,
+ )
+ else:
+ if twilio_call.status and twilio_call.sid:
+ phone_call.status = TwilioCallStatuses.DETERMINANT.get(twilio_call.status, None)
+ phone_call.sid = twilio_call.sid
+ else:
+ log_record = UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_PHONE_CALLS_LIMIT_EXCEEDED,
+ notification_step=notification_policy.step if notification_policy else None,
+ notification_channel=notification_policy.notify_by if notification_policy else None,
+ )
+ phone_call.exceeded_limit = True
+ phone_call.save()
+ else:
+ log_record = UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_PHONE_NUMBER_IS_NOT_VERIFIED,
+ notification_step=notification_policy.step if notification_policy else None,
+ notification_channel=notification_policy.notify_by if notification_policy else None,
+ )
+
+ if log_record is not None:
+ log_record.save()
+ user_notification_action_triggered_signal.send(sender=PhoneCall.make_call, log_record=log_record)
+
+ @staticmethod
+ def get_error_code_by_twilio_status(status):
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+
+ TWILIO_ERRORS_TO_ERROR_CODES_MAP = {
+ TwilioCallStatuses.BUSY: UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_PHONE_CALL_LINE_BUSY,
+ TwilioCallStatuses.FAILED: UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_PHONE_CALL_FAILED,
+ TwilioCallStatuses.NO_ANSWER: UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_PHONE_CALL_NO_ANSWER,
+ }
+
+ return TWILIO_ERRORS_TO_ERROR_CODES_MAP.get(status, None)
diff --git a/engine/apps/twilioapp/models/sms_message.py b/engine/apps/twilioapp/models/sms_message.py
new file mode 100644
index 0000000000..09404e56f4
--- /dev/null
+++ b/engine/apps/twilioapp/models/sms_message.py
@@ -0,0 +1,185 @@
+import logging
+
+from django.apps import apps
+from django.db import models
+from twilio.base.exceptions import TwilioRestException
+
+from apps.alerts.incident_appearance.renderers.sms_renderer import AlertGroupSmsRenderer
+from apps.alerts.signals import user_notification_action_triggered_signal
+from apps.twilioapp.constants import TwilioMessageStatuses
+from apps.twilioapp.twilio_client import twilio_client
+
+logger = logging.getLogger(__name__)
+
+
+class SMSMessageManager(models.Manager):
+ def update_status(self, message_sid, message_status):
+ """The function checks existence of SMSMessage
+ instance according to message_sid and updates status on
+ message_status
+
+ Args:
+ message_sid (str): sid of Twilio message
+ message_status (str): new status
+
+ Returns:
+
+ """
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+
+ if message_sid and message_status:
+ sms_message_qs = self.filter(sid=message_sid)
+
+ status = TwilioMessageStatuses.DETERMINANT.get(message_status)
+
+ if sms_message_qs.exists() and status:
+ sms_message_qs.update(status=status)
+
+ sms_message = sms_message_qs.first()
+
+ log_record = None
+
+ if status == TwilioMessageStatuses.DELIVERED:
+ log_record = UserNotificationPolicyLogRecord(
+ author=sms_message.receiver,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_SUCCESS,
+ notification_policy=sms_message.notification_policy,
+ alert_group=sms_message.represents_alert_group,
+ notification_step=sms_message.notification_policy.step
+ if sms_message.notification_policy
+ else None,
+ notification_channel=sms_message.notification_policy.notify_by
+ if sms_message.notification_policy
+ else None,
+ )
+ elif status in [TwilioMessageStatuses.UNDELIVERED, TwilioMessageStatuses.FAILED]:
+ log_record = UserNotificationPolicyLogRecord(
+ author=sms_message.receiver,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=sms_message.notification_policy,
+ alert_group=sms_message.represents_alert_group,
+ notification_error_code=sms_message.get_error_code_by_twilio_status(status),
+ notification_step=sms_message.notification_policy.step
+ if sms_message.notification_policy
+ else None,
+ notification_channel=sms_message.notification_policy.notify_by
+ if sms_message.notification_policy
+ else None,
+ )
+ if log_record is not None:
+ log_record.save()
+ user_notification_action_triggered_signal.send(
+ sender=SMSMessage.objects.update_status, log_record=log_record
+ )
+
+
+class SMSMessage(models.Model):
+ objects = SMSMessageManager()
+
+ exceeded_limit = models.BooleanField(null=True, default=None)
+ represents_alert = models.ForeignKey("alerts.Alert", on_delete=models.SET_NULL, null=True, default=None)
+ represents_alert_group = models.ForeignKey("alerts.AlertGroup", on_delete=models.SET_NULL, null=True, default=None)
+ notification_policy = models.ForeignKey(
+ "base.UserNotificationPolicy", on_delete=models.SET_NULL, null=True, default=None
+ )
+
+ receiver = models.ForeignKey("user_management.User", on_delete=models.CASCADE, null=True, default=None)
+
+ status = models.PositiveSmallIntegerField(
+ blank=True,
+ null=True,
+ choices=TwilioMessageStatuses.CHOICES,
+ )
+
+ # https://www.twilio.com/docs/sms/api/message-resource#message-properties
+ sid = models.CharField(
+ blank=True,
+ max_length=50,
+ )
+
+ created_at = models.DateTimeField(auto_now_add=True)
+
+ @property
+ def created_for_slack(self):
+ return bool(self.represents_alert_group.slack_message)
+
+ @classmethod
+ def send_sms(cls, user, alert_group, notification_policy):
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+
+ organization = alert_group.channel.organization
+
+ log_record = None
+ if user.verified_phone_number:
+ # Create an SMS object in db
+ sms_message = SMSMessage(
+ represents_alert_group=alert_group, receiver=user, notification_policy=notification_policy
+ )
+
+ sms_left = organization.sms_left(user)
+ if sms_left > 0:
+ # Mark is as successfully sent
+ sms_message.exceeded_limit = False
+ # Render alert message for sms
+ renderer = AlertGroupSmsRenderer(alert_group)
+ message_body = renderer.render()
+ # Notify if close to limit
+ if sms_left < 3:
+ message_body += " {} sms left. Contact your admin.".format(sms_left)
+ # Send an sms
+ try:
+ twilio_message = twilio_client.send_message(message_body, user.verified_phone_number)
+ except TwilioRestException:
+ log_record = UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_NOT_ABLE_TO_SEND_SMS,
+ notification_step=notification_policy.step if notification_policy else None,
+ notification_channel=notification_policy.notify_by if notification_policy else None,
+ )
+ else:
+ if twilio_message.status and twilio_message.sid:
+ sms_message.status = TwilioMessageStatuses.DETERMINANT.get(twilio_message.status, None)
+ sms_message.sid = twilio_message.sid
+ else:
+ # If no more sms left, mark as exceeded limit
+ log_record = UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_SMS_LIMIT_EXCEEDED,
+ notification_step=notification_policy.step if notification_policy else None,
+ notification_channel=notification_policy.notify_by if notification_policy else None,
+ )
+ sms_message.exceeded_limit = True
+
+ # Save object
+ sms_message.save()
+ else:
+ log_record = UserNotificationPolicyLogRecord(
+ author=user,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=notification_policy,
+ alert_group=alert_group,
+ notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_PHONE_NUMBER_IS_NOT_VERIFIED,
+ notification_step=notification_policy.step if notification_policy else None,
+ notification_channel=notification_policy.notify_by if notification_policy else None,
+ )
+
+ if log_record is not None:
+ log_record.save()
+ user_notification_action_triggered_signal.send(sender=SMSMessage.send_sms, log_record=log_record)
+
+ @staticmethod
+ def get_error_code_by_twilio_status(status):
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+
+ TWILIO_ERRORS_TO_ERROR_CODES_MAP = {
+ TwilioMessageStatuses.UNDELIVERED: UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_SMS_DELIVERY_FAILED,
+ TwilioMessageStatuses.FAILED: UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_SMS_DELIVERY_FAILED,
+ }
+
+ return TWILIO_ERRORS_TO_ERROR_CODES_MAP.get(status, None)
diff --git a/engine/apps/twilioapp/models/twilio_log_record.py b/engine/apps/twilioapp/models/twilio_log_record.py
new file mode 100644
index 0000000000..f4530b5d30
--- /dev/null
+++ b/engine/apps/twilioapp/models/twilio_log_record.py
@@ -0,0 +1,26 @@
+from django.db import models
+
+from apps.twilioapp.constants import TwilioLogRecordStatus, TwilioLogRecordType
+
+
+class TwilioLogRecord(models.Model):
+
+ user = models.ForeignKey("user_management.User", on_delete=models.CASCADE)
+
+ phone_number = models.CharField(max_length=16)
+
+ type = models.PositiveSmallIntegerField(
+ choices=TwilioLogRecordType.CHOICES, default=TwilioLogRecordType.VERIFICATION_START
+ )
+
+ status = models.PositiveSmallIntegerField(
+ choices=TwilioLogRecordStatus.CHOICES, default=TwilioLogRecordStatus.PENDING
+ )
+
+ payload = models.TextField(null=True, default=None)
+
+ error_message = models.TextField(null=True, default=None)
+
+ succeed = models.BooleanField(default=False)
+
+ created_at = models.DateTimeField(auto_now_add=True)
diff --git a/engine/apps/twilioapp/phone_manager.py b/engine/apps/twilioapp/phone_manager.py
new file mode 100644
index 0000000000..af1dff1970
--- /dev/null
+++ b/engine/apps/twilioapp/phone_manager.py
@@ -0,0 +1,75 @@
+import logging
+
+from twilio.base.exceptions import TwilioRestException
+
+from apps.twilioapp.twilio_client import twilio_client
+
+logger = logging.getLogger(__name__)
+
+
+class PhoneManager:
+ def __init__(self, user):
+ self.user = user
+
+ def send_verification_code(self):
+ if self.user.unverified_phone_number != self.user.verified_phone_number:
+ res = twilio_client.verification_start_via_twilio(
+ user=self.user, phone_number=self.user.unverified_phone_number, via="sms"
+ )
+ if res and res.status != "denied":
+ return True
+ else:
+ logger.error(f"Failed to send verification code to User {self.user.pk}:\n{res}")
+ return False
+
+ def verify_phone_number(self, code):
+ normalized_phone_number, _ = twilio_client.normalize_phone_number_via_twilio(self.user.unverified_phone_number)
+ if normalized_phone_number:
+ if normalized_phone_number == self.user.verified_phone_number:
+ verified = False
+ error = "This Phone Number has already been verified."
+ elif twilio_client.verification_check_via_twilio(
+ user=self.user,
+ phone_number=normalized_phone_number,
+ code=code,
+ ):
+ old_verified_phone_number = self.user.verified_phone_number
+ self.user.save_verified_phone_number(normalized_phone_number)
+ # send sms to the new number and to the old one
+ if old_verified_phone_number:
+ # notify about disconnect
+ self.notify_about_changed_verified_phone_number(old_verified_phone_number)
+ # notify about new connection
+ self.notify_about_changed_verified_phone_number(normalized_phone_number, True)
+
+ verified = True
+ error = None
+ else:
+ verified = False
+ error = "Verification code is not correct."
+ else:
+ verified = False
+ error = "Phone Number is incorrect."
+ return verified, error
+
+ def forget_phone_number(self):
+ if self.user.verified_phone_number or self.user.unverified_phone_number:
+ old_verified_phone_number = self.user.verified_phone_number
+ self.user.clear_phone_numbers()
+ if old_verified_phone_number:
+ self.notify_about_changed_verified_phone_number(old_verified_phone_number)
+ return True
+ return False
+
+ def notify_about_changed_verified_phone_number(self, phone_number, connected=False):
+ text = (
+ f"This phone number has been {'connected to' if connected else 'disconnected from'} Grafana OnCall team "
+ f'"{self.user.organization.org_title}"\nYour Grafana OnCall <3'
+ )
+ try:
+ twilio_client.send_message(text, phone_number)
+ except TwilioRestException as e:
+ logger.error(
+ f"Failed to notify user {self.user.pk} about phone number "
+ f"{'connection' if connected else 'disconnection'}:\n{e}"
+ )
diff --git a/engine/apps/twilioapp/tests/__init__.py b/engine/apps/twilioapp/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/twilioapp/tests/factories.py b/engine/apps/twilioapp/tests/factories.py
new file mode 100644
index 0000000000..e1b49940ca
--- /dev/null
+++ b/engine/apps/twilioapp/tests/factories.py
@@ -0,0 +1,13 @@
+import factory
+
+from apps.twilioapp.models import PhoneCall, SMSMessage
+
+
+class PhoneCallFactory(factory.DjangoModelFactory):
+ class Meta:
+ model = PhoneCall
+
+
+class SMSFactory(factory.DjangoModelFactory):
+ class Meta:
+ model = SMSMessage
diff --git a/engine/apps/twilioapp/tests/test_phone_calls.py b/engine/apps/twilioapp/tests/test_phone_calls.py
new file mode 100644
index 0000000000..22c64a3f8a
--- /dev/null
+++ b/engine/apps/twilioapp/tests/test_phone_calls.py
@@ -0,0 +1,270 @@
+from unittest import mock
+
+import pytest
+from bs4 import BeautifulSoup
+from django.urls import reverse
+from django.utils import timezone
+from django.utils.datastructures import MultiValueDict
+from django.utils.http import urlencode
+from rest_framework.test import APIClient
+
+from apps.base.models import UserNotificationPolicy
+from apps.twilioapp.constants import TwilioCallStatuses
+from apps.twilioapp.models import PhoneCall
+
+
+@pytest.fixture
+def phone_call_setup(
+ make_organization_and_user,
+ make_alert_receive_channel,
+ make_user_notification_policy,
+ make_alert_group,
+ make_alert,
+ make_phone_call,
+):
+ organization, user = make_organization_and_user()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+ make_alert(
+ alert_group,
+ raw_request_data={
+ "status": "firing",
+ "labels": {
+ "alertname": "TestAlert",
+ "region": "eu-1",
+ },
+ "annotations": {},
+ "startsAt": "2018-12-25T15:47:47.377363608Z",
+ "endsAt": "0001-01-01T00:00:00Z",
+ "generatorURL": "",
+ },
+ )
+
+ notification_policy = make_user_notification_policy(
+ user=user,
+ step=UserNotificationPolicy.Step.NOTIFY,
+ notify_by=UserNotificationPolicy.NotificationChannel.PHONE_CALL,
+ )
+
+ phone_call = make_phone_call(
+ receiver=user,
+ status=TwilioCallStatuses.QUEUED,
+ represents_alert_group=alert_group,
+ sid="SMa12312312a123a123123c6dd2f1aee77",
+ notification_policy=notification_policy,
+ )
+
+ return phone_call, alert_group
+
+
+@pytest.mark.django_db
+def test_phone_call_creation(phone_call_setup):
+ phone_call, _ = phone_call_setup
+ assert PhoneCall.objects.count() == 1
+ assert phone_call == PhoneCall.objects.first()
+
+
+@pytest.mark.django_db
+def test_forbidden_requests(phone_call_setup):
+ """Tests check inaccessibility of twilio urls for unauthorized requests"""
+ phone_call, _ = phone_call_setup
+
+ # empty data case
+ data = {}
+
+ client = APIClient()
+ response = client.post(
+ reverse("twilioapp:call_status_events"),
+ data=urlencode(MultiValueDict(data), doseq=True),
+ content_type="application/x-www-form-urlencoded",
+ )
+
+ assert response.status_code == 403
+ assert response.data["detail"] == "You do not have permission to perform this action."
+
+ # wrong AccountSid data
+ data = {"CallSid": phone_call.sid, "CallStatus": "completed", "AccountSid": "TopSecretAccountSid"}
+
+ client = APIClient()
+ response = client.post(
+ path=reverse("twilioapp:call_status_events"),
+ data=urlencode(MultiValueDict(data), doseq=True),
+ content_type="application/x-www-form-urlencoded",
+ )
+
+ assert response.status_code == 403
+ assert response.data["detail"] == "You do not have permission to perform this action."
+
+ # absent CallSid data
+ data = {"CallStatus": "completed", "AccountSid": "TopSecretAccountSid"}
+
+ client = APIClient()
+ response = client.post(
+ path=reverse("twilioapp:call_status_events"),
+ data=urlencode(MultiValueDict(data), doseq=True),
+ content_type="application/x-www-form-urlencoded",
+ )
+
+ assert response.status_code == 403
+ assert response.data["detail"] == "You do not have permission to perform this action."
+
+
+@mock.patch("apps.twilioapp.views.AllowOnlyTwilio.has_permission")
+@mock.patch("apps.slack.slack_client.SlackClientWithErrorHandling.api_call")
+@pytest.mark.django_db
+def test_update_status(mock_has_permission, mock_slack_api_call, phone_call_setup):
+ """The test for PhoneCall status update via api"""
+ phone_call, _ = phone_call_setup
+
+ mock_has_permission.return_value = True
+
+ for status in ["in-progress", "completed", "busy", "failed", "no-answer", "canceled"]:
+ mock_slack_api_call.return_value = {"ok": True, "ts": timezone.now().timestamp()}
+
+ data = {
+ "CallSid": phone_call.sid,
+ "CallStatus": status,
+ "AccountSid": "Because of mock_has_permission there are may be any value",
+ }
+
+ client = APIClient()
+ response = client.post(
+ path=reverse("twilioapp:call_status_events"),
+ data=urlencode(MultiValueDict(data), doseq=True),
+ content_type="application/x-www-form-urlencoded",
+ )
+
+ assert response.status_code == 204
+ assert response.data == ""
+
+ phone_call.refresh_from_db()
+ assert phone_call.status == TwilioCallStatuses.DETERMINANT[status]
+
+
+@mock.patch("apps.twilioapp.views.AllowOnlyTwilio.has_permission")
+@mock.patch("apps.twilioapp.utils.get_gather_url")
+@pytest.mark.django_db
+def test_acknowledge_by_phone(mock_has_permission, mock_get_gather_url, phone_call_setup):
+ phone_call, alert_group = phone_call_setup
+
+ mock_has_permission.return_value = True
+ mock_get_gather_url.return_value = reverse("twilioapp:gather")
+
+ data = {
+ "CallSid": phone_call.sid,
+ "Digits": "1",
+ "AccountSid": "Because of mock_has_permission there are may be any value",
+ }
+
+ assert alert_group.acknowledged is False
+
+ client = APIClient()
+ response = client.post(
+ reverse("twilioapp:gather"),
+ data=urlencode(MultiValueDict(data), doseq=True),
+ content_type="application/x-www-form-urlencoded",
+ )
+
+ content = response.content.decode("utf-8")
+
+ assert response.status_code == 200
+ assert "You have pressed digit 1" in content
+
+ alert_group.refresh_from_db()
+ assert alert_group.acknowledged is True
+
+
+@mock.patch("apps.twilioapp.views.AllowOnlyTwilio.has_permission")
+@mock.patch("apps.twilioapp.utils.get_gather_url")
+@pytest.mark.django_db
+def test_resolve_by_phone(mock_has_permission, mock_get_gather_url, phone_call_setup):
+ phone_call, alert_group = phone_call_setup
+
+ mock_has_permission.return_value = True
+ mock_get_gather_url.return_value = reverse("twilioapp:gather")
+
+ data = {
+ "CallSid": phone_call.sid,
+ "Digits": "2",
+ "AccountSid": "Because of mock_has_permission there are may be any value",
+ }
+
+ assert alert_group.resolved is False
+
+ client = APIClient()
+ response = client.post(
+ reverse("twilioapp:gather"),
+ data=urlencode(MultiValueDict(data), doseq=True),
+ content_type="application/x-www-form-urlencoded",
+ )
+
+ content = response.content.decode("utf-8")
+ content = BeautifulSoup(content, features="html.parser").findAll(text=True)
+
+ assert response.status_code == 200
+ assert "You have pressed digit 2" in content
+
+ alert_group.refresh_from_db()
+ assert alert_group.resolved is True
+
+
+@mock.patch("apps.twilioapp.views.AllowOnlyTwilio.has_permission")
+@mock.patch("apps.twilioapp.utils.get_gather_url")
+@pytest.mark.django_db
+def test_silence_by_phone(mock_has_permission, mock_get_gather_url, phone_call_setup):
+ phone_call, alert_group = phone_call_setup
+
+ mock_has_permission.return_value = True
+ mock_get_gather_url.return_value = reverse("twilioapp:gather")
+
+ data = {
+ "CallSid": phone_call.sid,
+ "Digits": "3",
+ "AccountSid": "Because of mock_has_permission there are may be any value",
+ }
+
+ assert alert_group.silenced_until is None
+
+ client = APIClient()
+ response = client.post(
+ reverse("twilioapp:gather"),
+ data=urlencode(MultiValueDict(data), doseq=True),
+ content_type="application/x-www-form-urlencoded",
+ )
+
+ content = response.content.decode("utf-8")
+
+ assert response.status_code == 200
+ assert "You have pressed digit 3" in content
+
+ alert_group.refresh_from_db()
+ assert alert_group.silenced_until is not None
+
+
+@mock.patch("apps.twilioapp.views.AllowOnlyTwilio.has_permission")
+@mock.patch("apps.twilioapp.utils.get_gather_url")
+@pytest.mark.django_db
+def test_wrong_pressed_digit(mock_has_permission, mock_get_gather_url, phone_call_setup):
+ phone_call, _ = phone_call_setup
+
+ mock_has_permission.return_value = True
+ mock_get_gather_url.return_value = reverse("twilioapp:gather")
+
+ data = {
+ "CallSid": phone_call.sid,
+ "Digits": "0",
+ "AccountSid": "Because of mock_has_permission there are may be any value",
+ }
+
+ client = APIClient()
+ response = client.post(
+ path=reverse("twilioapp:gather"),
+ data=urlencode(MultiValueDict(data), doseq=True),
+ content_type="application/x-www-form-urlencoded",
+ )
+
+ content = response.content.decode("utf-8")
+ content = BeautifulSoup(content, features="html.parser").findAll(text=True)
+
+ assert response.status_code == 200
+ assert "Wrong digit" in content
diff --git a/engine/apps/twilioapp/tests/test_sms_message.py b/engine/apps/twilioapp/tests/test_sms_message.py
new file mode 100644
index 0000000000..86ab2390a0
--- /dev/null
+++ b/engine/apps/twilioapp/tests/test_sms_message.py
@@ -0,0 +1,142 @@
+from unittest import mock
+
+import pytest
+from django.urls import reverse
+from django.utils import timezone
+from django.utils.datastructures import MultiValueDict
+from django.utils.http import urlencode
+from rest_framework.test import APIClient
+
+from apps.base.models import UserNotificationPolicy
+from apps.twilioapp.constants import TwilioMessageStatuses
+from apps.twilioapp.models import SMSMessage
+
+
+@pytest.fixture
+def sms_message_setup(
+ make_organization_and_user,
+ make_alert_receive_channel,
+ make_user_notification_policy,
+ make_alert_group,
+ make_alert,
+ make_phone_call,
+):
+ organization, user = make_organization_and_user()
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+ make_alert(
+ alert_group,
+ raw_request_data={
+ "status": "firing",
+ "labels": {
+ "alertname": "TestAlert",
+ "region": "eu-1",
+ },
+ "annotations": {},
+ "startsAt": "2018-12-25T15:47:47.377363608Z",
+ "endsAt": "0001-01-01T00:00:00Z",
+ "generatorURL": "",
+ },
+ )
+
+ notification_policy = make_user_notification_policy(
+ user=user,
+ step=UserNotificationPolicy.Step.NOTIFY,
+ notify_by=UserNotificationPolicy.NotificationChannel.SMS,
+ )
+
+ sms_message = SMSMessage.objects.create(
+ represents_alert_group=alert_group,
+ receiver=user,
+ sid="SMa12312312a123a123123c6dd2f1aee77",
+ status=TwilioMessageStatuses.QUEUED,
+ notification_policy=notification_policy,
+ )
+
+ return sms_message, alert_group
+
+
+@pytest.mark.django_db
+def test_sms_message_creation(sms_message_setup):
+ sms_message, _ = sms_message_setup
+
+ assert SMSMessage.objects.count() == 1
+ assert sms_message == SMSMessage.objects.first()
+
+
+@pytest.mark.django_db
+def test_forbidden_requests(sms_message_setup):
+ """Tests check inaccessibility of twilio urls for unauthorized requests"""
+ sms_message, _ = sms_message_setup
+
+ # empty data case
+ data = {}
+
+ client = APIClient()
+ response = client.post(
+ path=reverse("twilioapp:sms_status_events"),
+ data=urlencode(MultiValueDict(data), doseq=True),
+ content_type="application/x-www-form-urlencoded",
+ )
+
+ assert response.status_code == 403
+ assert response.data["detail"] == "You do not have permission to perform this action."
+
+ # wrong AccountSid data
+ data = {"MessageSid": sms_message.sid, "MessageStatus": "delivered", "AccountSid": "TopSecretAccountSid"}
+
+ response = client.post(
+ path=reverse("twilioapp:sms_status_events"),
+ data=urlencode(MultiValueDict(data), doseq=True),
+ content_type="application/x-www-form-urlencoded",
+ )
+
+ assert response.status_code == 403
+ assert response.data["detail"] == "You do not have permission to perform this action."
+
+ # absent MessageSid data
+ data = {"MessageStatus": "delivered", "AccountSid": "TopSecretAccountSid"}
+
+ response = client.post(
+ path=reverse("twilioapp:sms_status_events"),
+ data=urlencode(MultiValueDict(data), doseq=True),
+ content_type="application/x-www-form-urlencoded",
+ )
+
+ assert response.status_code == 403
+ assert response.data["detail"] == "You do not have permission to perform this action."
+
+
+@mock.patch("apps.twilioapp.views.AllowOnlyTwilio.has_permission")
+@mock.patch("apps.slack.slack_client.SlackClientWithErrorHandling.api_call")
+@pytest.mark.django_db
+def test_update_status(mock_has_permission, mock_slack_api_call, sms_message_setup):
+ """The test for SMSMessage status update via api"""
+ sms_message, _ = sms_message_setup
+
+ # https://stackoverflow.com/questions/50157543/unittest-django-mock-external-api-what-is-proper-way
+ # Define response for the fake SlackClientWithErrorHandling.api_call
+ mock_has_permission.return_value = True
+
+ for status in ["delivered", "failed", "undelivered"]:
+ mock_slack_api_call.return_value = {"ok": True, "ts": timezone.now().timestamp()}
+
+ data = {
+ "MessageSid": sms_message.sid,
+ "MessageStatus": status,
+ "AccountSid": "Because of mock_has_permission there are may be any value",
+ }
+ # https://stackoverflow.com/questions/11571474/djangos-test-client-with-multiple-values-for-data-keys
+
+ client = APIClient()
+ response = client.post(
+ path=reverse("twilioapp:sms_status_events"),
+ data=urlencode(MultiValueDict(data), doseq=True),
+ content_type="application/x-www-form-urlencoded",
+ )
+
+ assert response.status_code == 204
+ assert response.data == ""
+
+ sms_message.refresh_from_db()
+ assert sms_message.status == TwilioMessageStatuses.DETERMINANT[status]
diff --git a/engine/apps/twilioapp/twilio_client.py b/engine/apps/twilioapp/twilio_client.py
new file mode 100644
index 0000000000..9c2d6cc3b5
--- /dev/null
+++ b/engine/apps/twilioapp/twilio_client.py
@@ -0,0 +1,179 @@
+import logging
+import urllib.parse
+
+from django.apps import apps
+from django.conf import settings
+from django.urls import reverse
+from twilio.base.exceptions import TwilioRestException
+from twilio.rest import Client
+
+from apps.base.utils import live_settings
+from apps.twilioapp.constants import TEST_CALL_TEXT, TwilioLogRecordStatus, TwilioLogRecordType
+from apps.twilioapp.utils import get_calling_code, get_gather_message, get_gather_url, parse_phone_number
+
+logger = logging.getLogger(__name__)
+
+
+class TwilioClient:
+ @property
+ def twilio_api_client(self):
+ return Client(live_settings.TWILIO_ACCOUNT_SID, live_settings.TWILIO_AUTH_TOKEN)
+
+ @property
+ def twilio_number(self):
+ return live_settings.TWILIO_NUMBER
+
+ def send_message(self, body, to):
+ status_callback = settings.BASE_URL + reverse("twilioapp:sms_status_events")
+ return self.twilio_api_client.messages.create(
+ body=body, to=to, from_=self.twilio_number, status_callback=status_callback
+ )
+
+ # Use responsibly
+ def parse_number(self, number):
+ try:
+ response = self.twilio_api_client.lookups.phone_numbers(number).fetch()
+ return True, response.phone_number, get_calling_code(response.country_code)
+ except TwilioRestException as e:
+ if e.code == 20404:
+ print("Handled exception from twilio: " + str(e))
+ return False, None, None
+ if e.code == 20003:
+ raise e
+ except KeyError as e:
+ print("Handled exception from twilio: " + str(e))
+ return False, None, None
+
+ def verification_start_via_twilio(self, user, phone_number, via):
+ # https://www.twilio.com/docs/verify/api/verification?code-sample=code-start-a-verification-with-sms&code-language=Python&code-sdk-version=6.x
+ verification = None
+ try:
+ verification = self.twilio_api_client.verify.services(
+ live_settings.TWILIO_VERIFY_SERVICE_SID
+ ).verifications.create(to=phone_number, channel=via)
+ except TwilioRestException as e:
+ logger.error(f"Twilio verification start error: {e} for User: {user.pk}")
+
+ self.create_log_record(
+ user=user,
+ phone_number=(phone_number or ""),
+ type=TwilioLogRecordType.VERIFICATION_START,
+ status=TwilioLogRecordStatus.ERROR,
+ succeed=False,
+ error_message=str(e),
+ )
+ else:
+ verification_status = verification.status
+ logger.info(f"Verification status: {verification_status}")
+
+ self.create_log_record(
+ user=user,
+ phone_number=phone_number,
+ type=TwilioLogRecordType.VERIFICATION_START,
+ payload=str(verification._properties),
+ status=TwilioLogRecordStatus.DETERMINANT[verification_status],
+ succeed=(verification_status != "denied"),
+ )
+
+ return verification
+
+ def verification_check_via_twilio(self, user, phone_number, code):
+ # https://www.twilio.com/docs/verify/api/verification-check?code-sample=code-check-a-verification-with-a-phone-number&code-language=Python&code-sdk-version=6.x
+ succeed = False
+ try:
+ verification_check = self.twilio_api_client.verify.services(
+ live_settings.TWILIO_VERIFY_SERVICE_SID
+ ).verification_checks.create(to=phone_number, code=code)
+ except TwilioRestException as e:
+ logger.error(f"Twilio verification check error: {e} for User: {user.pk}")
+ self.create_log_record(
+ user=user,
+ phone_number=(phone_number or ""),
+ type=TwilioLogRecordType.VERIFICATION_CHECK,
+ status=TwilioLogRecordStatus.ERROR,
+ succeed=succeed,
+ error_message=str(e),
+ )
+ else:
+ verification_check_status = verification_check.status
+ logger.info(f"Verification check status: {verification_check_status}")
+ succeed = verification_check_status == "approved"
+
+ self.create_log_record(
+ user=user,
+ phone_number=phone_number,
+ type=TwilioLogRecordType.VERIFICATION_CHECK,
+ payload=str(verification_check._properties),
+ status=TwilioLogRecordStatus.DETERMINANT[verification_check_status],
+ succeed=succeed,
+ )
+
+ return succeed
+
+ def make_test_call(self, to):
+ message = TEST_CALL_TEXT.format(
+ channel_name="Test call",
+ alert_group_name="Test notification",
+ alerts_count=2,
+ )
+ self.make_call(message=message, to=to)
+
+ def make_call(self, message, to):
+ try:
+ start_message = message.replace('"', "")
+
+ twiml_query = urllib.parse.quote(
+ (
+ f""
+ f"{start_message} "
+ f''
+ f"{get_gather_message()} "
+ f" "
+ f" "
+ ),
+ safe="",
+ )
+
+ url = "http://twimlets.com/echo?Twiml=" + twiml_query
+ status_callback = settings.BASE_URL + reverse("twilioapp:call_status_events")
+
+ status_callback_events = ["initiated", "ringing", "answered", "completed"]
+
+ return self.twilio_api_client.calls.create(
+ url=url,
+ to=to,
+ from_=self.twilio_number,
+ method="GET",
+ status_callback=status_callback,
+ status_callback_event=status_callback_events,
+ status_callback_method="POST",
+ )
+ except TwilioRestException as e:
+ raise e
+
+ def create_log_record(self, **kwargs):
+ TwilioLogRecord = apps.get_model("twilioapp", "TwilioLogRecord")
+ TwilioLogRecord.objects.create(**kwargs)
+
+ def normalize_phone_number_via_twilio(self, phone_number):
+ phone_number = parse_phone_number(phone_number)
+
+ # Verify and parse phone number with Twilio API
+ normalized_phone_number = None
+ country_code = None
+ if phone_number != "" and phone_number != "+":
+ try:
+ ok, normalized_phone_number, country_code = self.parse_number(phone_number)
+ if normalized_phone_number == "":
+ normalized_phone_number = None
+ country_code = None
+ if not ok:
+ normalized_phone_number = None
+ country_code = None
+ except TypeError:
+ return None, None
+
+ return normalized_phone_number, country_code
+
+
+twilio_client = TwilioClient()
diff --git a/engine/apps/twilioapp/urls.py b/engine/apps/twilioapp/urls.py
new file mode 100644
index 0000000000..7d010b436d
--- /dev/null
+++ b/engine/apps/twilioapp/urls.py
@@ -0,0 +1,12 @@
+from django.urls import path
+
+from .views import CallStatusCallback, GatherView, HealthCheckView, SMSStatusCallback
+
+app_name = "twilioapp"
+
+urlpatterns = [
+ path("healthz", HealthCheckView.as_view()),
+ path("gather/", GatherView.as_view(), name="gather"),
+ path("sms_status_events/", SMSStatusCallback.as_view(), name="sms_status_events"),
+ path("call_status_events/", CallStatusCallback.as_view(), name="call_status_events"),
+]
diff --git a/engine/apps/twilioapp/utils.py b/engine/apps/twilioapp/utils.py
new file mode 100644
index 0000000000..10986ddac8
--- /dev/null
+++ b/engine/apps/twilioapp/utils.py
@@ -0,0 +1,67 @@
+import logging
+import re
+from string import digits
+
+from django.apps import apps
+from django.conf import settings
+from django.urls import reverse
+from phonenumbers import COUNTRY_CODE_TO_REGION_CODE
+from twilio.twiml.voice_response import Gather, VoiceResponse
+
+logger = logging.getLogger(__name__)
+
+
+def get_calling_code(iso):
+ for code, isos in COUNTRY_CODE_TO_REGION_CODE.items():
+ if iso.upper() in isos:
+ return code
+ return None
+
+
+def get_gather_url():
+ gather_url = settings.BASE_URL + reverse("twilioapp:gather")
+ return gather_url
+
+
+def get_gather_message():
+ return "Press 1 to acknowledge, 2 to resolve, 3 to silence to 30 minutes"
+
+
+def process_call_data(call_sid, digit):
+ """The function processes pressed digit at call time
+
+ Args:
+ call_sid (str):
+ digit (str): user pressed digit
+
+ Returns:
+ response (VoiceResponse)
+ """
+
+ response = VoiceResponse()
+
+ if digit in ["1", "2", "3"]:
+ # Success case
+ response.say(f"You have pressed digit {digit}")
+
+ PhoneCall = apps.get_model("twilioapp", "PhoneCall")
+ PhoneCall.objects.get_and_process_digit(call_sid=call_sid, digit=digit)
+
+ else:
+ # Error wrong digit pressing
+ gather = Gather(action=get_gather_url(), method="POST", num_digits=1)
+
+ response.say("Wrong digit")
+ gather.say(get_gather_message())
+
+ response.append(gather)
+
+ return response
+
+
+def check_phone_number_is_valid(phone_number):
+ return re.match(r"^\+\d{8,15}$", phone_number) is not None
+
+
+def parse_phone_number(raw_phone_number):
+ return "+" + "".join(c for c in raw_phone_number if c in digits)
diff --git a/engine/apps/twilioapp/views.py b/engine/apps/twilioapp/views.py
new file mode 100644
index 0000000000..14d3756b23
--- /dev/null
+++ b/engine/apps/twilioapp/views.py
@@ -0,0 +1,74 @@
+import logging
+
+from django.apps import apps
+from django.http import HttpResponse
+from rest_framework import status
+from rest_framework.permissions import BasePermission
+from rest_framework.response import Response
+from rest_framework.views import APIView
+from twilio.request_validator import RequestValidator
+
+from apps.base.utils import live_settings
+from apps.twilioapp.utils import process_call_data
+
+logger = logging.getLogger(__name__)
+
+
+class AllowOnlyTwilio(BasePermission):
+ def has_permission(self, request, view):
+ # https://www.twilio.com/docs/usage/tutorials/how-to-secure-your-django-project-by-validating-incoming-twilio-requests
+ # https://www.django-rest-framework.org/api-guide/permissions/
+ validator = RequestValidator(live_settings.TWILIO_AUTH_TOKEN)
+ request_valid = validator.validate(
+ request.build_absolute_uri(), request.POST, request.META.get("HTTP_X_TWILIO_SIGNATURE", "")
+ )
+ return request_valid
+
+
+class HealthCheckView(APIView):
+ def get(self, request):
+ return Response("OK")
+
+
+class GatherView(APIView):
+ permission_classes = [AllowOnlyTwilio]
+
+ def post(self, request):
+ digit = request.POST.get("Digits")
+ call_sid = request.POST.get("CallSid")
+
+ logging.info(f"For CallSid: {call_sid} pressed digit: {digit}")
+
+ response = process_call_data(call_sid=call_sid, digit=digit)
+
+ return HttpResponse(str(response), content_type="application/xml; charset=utf-8")
+
+
+# Receive SMS Status Update from Twilio
+class SMSStatusCallback(APIView):
+ permission_classes = [AllowOnlyTwilio]
+
+ def post(self, request):
+ message_sid = request.POST.get("MessageSid")
+ message_status = request.POST.get("MessageStatus")
+ logging.info(f"SID: {message_sid}, Status: {message_status}")
+
+ SMSMessage = apps.get_model("twilioapp", "SMSMessage")
+ SMSMessage.objects.update_status(message_sid=message_sid, message_status=message_status)
+ return Response(data="", status=status.HTTP_204_NO_CONTENT)
+
+
+# Receive Call Status Update from Twilio
+class CallStatusCallback(APIView):
+ permission_classes = [AllowOnlyTwilio]
+
+ def post(self, request):
+ call_sid = request.POST.get("CallSid")
+ call_status = request.POST.get("CallStatus")
+
+ logging.info(f"SID: {call_sid}, Status: {call_status}")
+
+ PhoneCall = apps.get_model("twilioapp", "PhoneCall")
+ PhoneCall.objects.update_status(call_sid=call_sid, call_status=call_status)
+
+ return Response(data="", status=status.HTTP_204_NO_CONTENT)
diff --git a/engine/apps/user_management/__init__.py b/engine/apps/user_management/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/user_management/admin.py b/engine/apps/user_management/admin.py
new file mode 100644
index 0000000000..a5b79cc391
--- /dev/null
+++ b/engine/apps/user_management/admin.py
@@ -0,0 +1,20 @@
+from django.contrib import admin
+
+from common.admin import CustomModelAdmin
+
+from .models import Organization, Team, User
+
+
+@admin.register(User)
+class UserAdmin(CustomModelAdmin):
+ list_display = ("id", "public_primary_key", "organization", "username", "email")
+
+
+@admin.register(Team)
+class TeamAdmin(CustomModelAdmin):
+ list_display = ("id", "public_primary_key", "organization", "name")
+
+
+@admin.register(Organization)
+class OrganizationAdmin(CustomModelAdmin):
+ list_display = ("id", "public_primary_key", "org_title", "org_slug", "org_id", "stack_id")
diff --git a/engine/apps/user_management/migrations/0001_squashed_initial.py b/engine/apps/user_management/migrations/0001_squashed_initial.py
new file mode 100644
index 0000000000..e88a5acb7b
--- /dev/null
+++ b/engine/apps/user_management/migrations/0001_squashed_initial.py
@@ -0,0 +1,107 @@
+# Generated by Django 3.2.5 on 2022-05-31 14:46
+
+import apps.user_management.models.organization
+import apps.user_management.models.team
+import apps.user_management.models.user
+import datetime
+import django.core.validators
+from django.db import migrations, models
+import django.db.models.deletion
+import mirage.fields
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ('alerts', '0001_squashed_initial'),
+ ('slack', '0001_squashed_initial'),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='Organization',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('maintenance_duration', models.DurationField(choices=[(datetime.timedelta(seconds=3600), '1 hour'), (datetime.timedelta(seconds=10800), '3 hours'), (datetime.timedelta(seconds=21600), '6 hours'), (datetime.timedelta(seconds=43200), '12 hours'), (datetime.timedelta(days=1), '24 hours')], default=None, null=True)),
+ ('maintenance_mode', models.IntegerField(choices=[(0, 'Debug'), (1, 'Maintenance')], default=None, null=True)),
+ ('maintenance_uuid', models.CharField(default=None, max_length=250, null=True, unique=True)),
+ ('maintenance_started_at', models.DateTimeField(default=None, null=True)),
+ ('public_primary_key', models.CharField(default=apps.user_management.models.organization.generate_public_primary_key_for_organization, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('stack_id', models.PositiveIntegerField()),
+ ('org_id', models.PositiveIntegerField()),
+ ('stack_slug', models.CharField(max_length=300)),
+ ('org_slug', models.CharField(max_length=300)),
+ ('org_title', models.CharField(max_length=300)),
+ ('grafana_url', models.URLField()),
+ ('api_token', mirage.fields.EncryptedCharField(max_length=300)),
+ ('api_token_status', models.IntegerField(choices=[(0, 'API Token Status Pending'), (1, 'API Token Status Ok'), (2, 'API Token Status Failed')], default=0)),
+ ('gcom_token', mirage.fields.EncryptedCharField(default=None, max_length=300, null=True)),
+ ('gcom_token_org_last_time_synced', models.DateTimeField(default=None, null=True)),
+ ('last_time_synced', models.DateTimeField(default=None, null=True)),
+ ('is_resolution_note_required', models.BooleanField(default=False)),
+ ('archive_alerts_from', models.DateField(default='1970-01-01')),
+ ('general_log_channel_id', models.CharField(default=None, max_length=100, null=True)),
+ ('acknowledge_remind_timeout', models.IntegerField(choices=[(0, 'Never remind about ack-ed incidents'), (1, 'Remind every 1 hour'), (2, 'Remind every 3 hours'), (3, 'Remind every 5 hours'), (4, 'Remind every 10 hours')], default=0)),
+ ('unacknowledge_timeout', models.IntegerField(choices=[(0, 'and never unack'), (1, 'and unack in 5 min if no response'), (2, 'and unack in 15 min if no response'), (3, 'and unack in 30 min if no response'), (4, 'and unack in 45 min if no response')], default=0)),
+ ('datetime', models.DateTimeField(auto_now_add=True)),
+ ('pricing_version', models.PositiveIntegerField(choices=[(0, 'Free public beta')], default=0)),
+ ('is_amixr_migration_started', models.BooleanField(default=False)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Team',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('public_primary_key', models.CharField(default=apps.user_management.models.team.generate_public_primary_key_for_team, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('team_id', models.PositiveIntegerField()),
+ ('name', models.CharField(max_length=300)),
+ ('email', models.CharField(blank=True, default=None, max_length=300, null=True)),
+ ('avatar_url', models.URLField()),
+ ('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='teams', to='user_management.organization')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='User',
+ fields=[
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('public_primary_key', models.CharField(default=apps.user_management.models.user.generate_public_primary_key_for_user, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
+ ('user_id', models.PositiveIntegerField()),
+ ('email', models.EmailField(max_length=254)),
+ ('name', models.CharField(max_length=300)),
+ ('username', models.CharField(max_length=300)),
+ ('role', models.PositiveSmallIntegerField(choices=[(0, 'ADMIN'), (1, 'EDITOR'), (2, 'VIEWER')])),
+ ('avatar_url', models.URLField()),
+ ('unverified_phone_number', models.CharField(default=None, max_length=20, null=True)),
+ ('_verified_phone_number', models.CharField(default=None, max_length=20, null=True)),
+ ('is_active', models.BooleanField(default=True, null=True)),
+ ('current_team', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='user_management.team')),
+ ('notification', models.ManyToManyField(through='alerts.UserHasNotification', to='alerts.AlertGroup')),
+ ('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='users', to='user_management.organization')),
+ ('slack_user_identity', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='users', to='slack.slackuseridentity')),
+ ],
+ options={
+ 'unique_together': {('user_id', 'organization', 'is_active')},
+ },
+ ),
+ migrations.AddField(
+ model_name='team',
+ name='users',
+ field=models.ManyToManyField(related_name='teams', to='user_management.User'),
+ ),
+ migrations.AddField(
+ model_name='organization',
+ name='maintenance_author',
+ field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='organization_maintenances_created', to='user_management.user'),
+ ),
+ migrations.AddField(
+ model_name='organization',
+ name='slack_team_identity',
+ field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='organizations', to='slack.slackteamidentity'),
+ ),
+ migrations.AlterUniqueTogether(
+ name='organization',
+ unique_together={('stack_id', 'org_id')},
+ ),
+ ]
diff --git a/engine/apps/user_management/migrations/0002_squashed_create_demo_token_instances.py b/engine/apps/user_management/migrations/0002_squashed_create_demo_token_instances.py
new file mode 100644
index 0000000000..8b8f932cc6
--- /dev/null
+++ b/engine/apps/user_management/migrations/0002_squashed_create_demo_token_instances.py
@@ -0,0 +1,51 @@
+# Generated by Django 3.2.5 on 2021-08-04 10:46
+
+import sys
+from django.db import migrations
+from apps.public_api import constants as public_api_constants
+from common.constants.role import Role
+
+
+def create_demo_token_instances(apps, schema_editor):
+ if not (len(sys.argv) > 1 and sys.argv[1] == 'test'):
+ SlackUserIdentity = apps.get_model('slack', 'SlackUserIdentity')
+ SlackTeamIdentity = apps.get_model('slack', 'SlackTeamIdentity')
+ User = apps.get_model('user_management', 'User')
+ Organization = apps.get_model('user_management', 'Organization')
+
+ slack_team_identity = SlackTeamIdentity.objects.get(slack_id=public_api_constants.DEMO_SLACK_TEAM_ID)
+ slack_user_identity = SlackUserIdentity.objects.get(
+ slack_id=public_api_constants.DEMO_SLACK_USER_ID,
+ slack_team_identity=slack_team_identity,
+ )
+
+ organization, _ = Organization.objects.get_or_create(
+ public_primary_key=public_api_constants.DEMO_ORGANIZATION_ID,
+ defaults=dict(
+ slack_team_identity=slack_team_identity,
+ org_id=0, stack_id=0,
+ )
+ )
+ User.objects.get_or_create(
+ public_primary_key=public_api_constants.DEMO_USER_ID,
+ defaults=dict(
+ username=public_api_constants.DEMO_USER_USERNAME,
+ email=public_api_constants.DEMO_USER_EMAIL,
+ organization=organization,
+ role=Role.ADMIN,
+ slack_user_identity=slack_user_identity,
+ user_id=0,
+ )
+ )
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('user_management', '0001_squashed_initial'),
+ ('slack', '0003_squashed_create_demo_token_instances'),
+ ]
+
+ operations = [
+ migrations.RunPython(create_demo_token_instances, migrations.RunPython.noop)
+ ]
diff --git a/engine/apps/user_management/migrations/__init__.py b/engine/apps/user_management/migrations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/user_management/models/__init__.py b/engine/apps/user_management/models/__init__.py
new file mode 100644
index 0000000000..95ed32ab19
--- /dev/null
+++ b/engine/apps/user_management/models/__init__.py
@@ -0,0 +1,3 @@
+from .user import User # noqa: F401, isort: skip
+from .organization import Organization # noqa: F401
+from .team import Team # noqa: F401
diff --git a/engine/apps/user_management/models/organization.py b/engine/apps/user_management/models/organization.py
new file mode 100644
index 0000000000..aa22f58936
--- /dev/null
+++ b/engine/apps/user_management/models/organization.py
@@ -0,0 +1,266 @@
+import logging
+from urllib.parse import urljoin
+
+from django.apps import apps
+from django.conf import settings
+from django.core.validators import MinLengthValidator
+from django.db import models
+from mirage import fields as mirage_fields
+
+from apps.alerts.models import MaintainableObject
+from apps.alerts.tasks import disable_maintenance
+from apps.slack.utils import post_message_to_channel
+from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
+from apps.user_management.subscription_strategy import FreePublicBetaSubscriptionStrategy
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+
+logger = logging.getLogger(__name__)
+
+
+def generate_public_primary_key_for_organization():
+ prefix = "O"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while Organization.objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="Organization"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+class Organization(MaintainableObject):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.subscription_strategy = self._get_subscription_strategy()
+
+ def _get_subscription_strategy(self):
+ if self.pricing_version == self.FREE_PUBLIC_BETA_PRICING:
+ return FreePublicBetaSubscriptionStrategy(self)
+
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_organization,
+ )
+
+ stack_id = models.PositiveIntegerField()
+ org_id = models.PositiveIntegerField()
+
+ stack_slug = models.CharField(max_length=300)
+ org_slug = models.CharField(max_length=300)
+ org_title = models.CharField(max_length=300)
+
+ grafana_url = models.URLField()
+
+ api_token = mirage_fields.EncryptedCharField(max_length=300)
+
+ (
+ API_TOKEN_STATUS_PENDING,
+ API_TOKEN_STATUS_OK,
+ API_TOKEN_STATUS_FAILED,
+ ) = range(3)
+ API_TOKEN_STATUS_CHOICES = (
+ (API_TOKEN_STATUS_PENDING, "API Token Status Pending"),
+ (API_TOKEN_STATUS_OK, "API Token Status Ok"),
+ (API_TOKEN_STATUS_FAILED, "API Token Status Failed"),
+ )
+ api_token_status = models.IntegerField(
+ choices=API_TOKEN_STATUS_CHOICES,
+ default=API_TOKEN_STATUS_PENDING,
+ )
+
+ gcom_token = mirage_fields.EncryptedCharField(max_length=300, null=True, default=None)
+ gcom_token_org_last_time_synced = models.DateTimeField(null=True, default=None)
+
+ last_time_synced = models.DateTimeField(null=True, default=None)
+
+ is_resolution_note_required = models.BooleanField(default=False)
+
+ archive_alerts_from = models.DateField(default="1970-01-01")
+
+ # TODO: this field is specific to slack and will be moved to a different model
+ slack_team_identity = models.ForeignKey(
+ "slack.SlackTeamIdentity", on_delete=models.PROTECT, null=True, default=None, related_name="organizations"
+ )
+
+ # Slack specific field with general log channel id
+ general_log_channel_id = models.CharField(max_length=100, null=True, default=None)
+
+ # Organization Settings configured from slack
+ (
+ ACKNOWLEDGE_REMIND_NEVER,
+ ACKNOWLEDGE_REMIND_1H,
+ ACKNOWLEDGE_REMIND_3H,
+ ACKNOWLEDGE_REMIND_5H,
+ ACKNOWLEDGE_REMIND_10H,
+ ) = range(5)
+ ACKNOWLEDGE_REMIND_CHOICES = (
+ (ACKNOWLEDGE_REMIND_NEVER, "Never remind about ack-ed incidents"),
+ (ACKNOWLEDGE_REMIND_1H, "Remind every 1 hour"),
+ (ACKNOWLEDGE_REMIND_3H, "Remind every 3 hours"),
+ (ACKNOWLEDGE_REMIND_5H, "Remind every 5 hours"),
+ (ACKNOWLEDGE_REMIND_10H, "Remind every 10 hours"),
+ )
+ ACKNOWLEDGE_REMIND_DELAY = {
+ ACKNOWLEDGE_REMIND_NEVER: 0,
+ ACKNOWLEDGE_REMIND_1H: 3600,
+ ACKNOWLEDGE_REMIND_3H: 10800,
+ ACKNOWLEDGE_REMIND_5H: 18000,
+ ACKNOWLEDGE_REMIND_10H: 36000,
+ }
+ acknowledge_remind_timeout = models.IntegerField(
+ choices=ACKNOWLEDGE_REMIND_CHOICES,
+ default=ACKNOWLEDGE_REMIND_NEVER,
+ )
+
+ (
+ UNACKNOWLEDGE_TIMEOUT_NEVER,
+ UNACKNOWLEDGE_TIMEOUT_5MIN,
+ UNACKNOWLEDGE_TIMEOUT_15MIN,
+ UNACKNOWLEDGE_TIMEOUT_30MIN,
+ UNACKNOWLEDGE_TIMEOUT_45MIN,
+ ) = range(5)
+
+ UNACKNOWLEDGE_TIMEOUT_CHOICES = (
+ (UNACKNOWLEDGE_TIMEOUT_NEVER, "and never unack"),
+ (UNACKNOWLEDGE_TIMEOUT_5MIN, "and unack in 5 min if no response"),
+ (UNACKNOWLEDGE_TIMEOUT_15MIN, "and unack in 15 min if no response"),
+ (UNACKNOWLEDGE_TIMEOUT_30MIN, "and unack in 30 min if no response"),
+ (UNACKNOWLEDGE_TIMEOUT_45MIN, "and unack in 45 min if no response"),
+ )
+ UNACKNOWLEDGE_TIMEOUT_DELAY = {
+ UNACKNOWLEDGE_TIMEOUT_NEVER: 0,
+ UNACKNOWLEDGE_TIMEOUT_5MIN: 300,
+ UNACKNOWLEDGE_TIMEOUT_15MIN: 900,
+ UNACKNOWLEDGE_TIMEOUT_30MIN: 1800,
+ UNACKNOWLEDGE_TIMEOUT_45MIN: 2700,
+ }
+ unacknowledge_timeout = models.IntegerField(
+ choices=UNACKNOWLEDGE_TIMEOUT_CHOICES,
+ default=UNACKNOWLEDGE_TIMEOUT_NEVER,
+ )
+
+ # This field is used to calculate public suggestions time
+ # Not sure if it is needed
+ datetime = models.DateTimeField(auto_now_add=True)
+
+ FREE_PUBLIC_BETA_PRICING = 0
+ PRICING_CHOICES = ((FREE_PUBLIC_BETA_PRICING, "Free public beta"),)
+ pricing_version = models.PositiveIntegerField(choices=PRICING_CHOICES, default=FREE_PUBLIC_BETA_PRICING)
+
+ is_amixr_migration_started = models.BooleanField(default=False)
+
+ class Meta:
+ unique_together = ("stack_id", "org_id")
+
+ def provision_plugin(self) -> dict:
+ PluginAuthToken = apps.get_model("auth_token", "PluginAuthToken")
+ _, token = PluginAuthToken.create_auth_token(organization=self)
+ return {
+ "pk": self.public_primary_key,
+ "jsonData": {
+ "stackId": self.stack_id,
+ "orgId": self.org_id,
+ "onCallApiUrl": settings.BASE_URL,
+ "license": settings.LICENSE,
+ },
+ "secureJsonData": {"onCallToken": token},
+ }
+
+ def revoke_plugin(self):
+ token_model = apps.get_model("auth_token", "PluginAuthToken")
+ token_model.objects.filter(organization=self).delete()
+
+ """
+ Following methods: start_disable_maintenance_task, force_disable_maintenance, get_organization, get_verbal serve for
+ MaintainableObject.
+ """
+
+ def start_disable_maintenance_task(self, countdown):
+ maintenance_uuid = disable_maintenance.apply_async(
+ args=(),
+ kwargs={
+ "organization_id": self.pk,
+ },
+ countdown=countdown,
+ )
+ return maintenance_uuid
+
+ def force_disable_maintenance(self, user):
+ disable_maintenance(organization_id=self.pk, force=True, user_id=user.pk)
+
+ def get_organization(self):
+ return self
+
+ def get_team(self):
+ return None
+
+ def get_verbal(self):
+ return self.org_title
+
+ def notify_about_maintenance_action(self, text, send_to_general_log_channel=True):
+ if send_to_general_log_channel:
+ post_message_to_channel(self, self.general_log_channel_id, text)
+
+ """
+ Following methods:
+ phone_calls_left, sms_left, emails_left, notifications_limit_web_report
+ serve for calculating notifications' limits and composed from self.subscription_strategy.
+ """
+
+ def phone_calls_left(self, user):
+ return self.subscription_strategy.phone_calls_left(user)
+
+ def sms_left(self, user):
+ return self.subscription_strategy.sms_left(user)
+
+ def emails_left(self, user):
+ return self.subscription_strategy.emails_left(user)
+
+ def notifications_limit_web_report(self, user):
+ return self.subscription_strategy.notifications_limit_web_report(user)
+
+ def set_general_log_channel(self, channel_id, channel_name, user):
+ if self.general_log_channel_id != channel_id:
+ old_general_log_channel_id = self.slack_team_identity.cached_channels.filter(
+ slack_id=self.general_log_channel_id
+ ).first()
+ old_channel_name = old_general_log_channel_id.name if old_general_log_channel_id else None
+ self.general_log_channel_id = channel_id
+ self.save(update_fields=["general_log_channel_id"])
+ description = (
+ f"The default channel for incidents in Slack changed "
+ f"{f'from #{old_channel_name} ' if old_channel_name else ''}to #{channel_name}"
+ )
+ create_organization_log(self, user, OrganizationLogType.TYPE_SLACK_DEFAULT_CHANNEL_CHANGED, description)
+
+ @property
+ def repr_settings_for_client_side_logging(self):
+ """
+ Example of execution:
+ # TODO: 770: check format
+ name: Test, archive alerts from date: 2019-10-24, require resolution note: No
+ acknowledge remind settings: Never remind about ack-ed incidents, and never unack
+ """
+ result = (
+ f"name: {self.org_title}, "
+ f"archive alerts from date: {self.archive_alerts_from.isoformat()}, "
+ f"require resolution note: {'Yes' if self.is_resolution_note_required else 'No'}"
+ )
+ if self.slack_team_identity:
+ result += (
+ f"\nacknowledge remind settings: {self.get_acknowledge_remind_timeout_display()}, "
+ f"{self.get_unacknowledge_timeout_display()}, "
+ )
+ return result
+
+ @property
+ def web_link(self):
+ return urljoin(self.grafana_url, "a/grafana-oncall-app/")
+
+ def __str__(self):
+ return f"{self.pk}: {self.org_title}"
diff --git a/engine/apps/user_management/models/team.py b/engine/apps/user_management/models/team.py
new file mode 100644
index 0000000000..15cbe62898
--- /dev/null
+++ b/engine/apps/user_management/models/team.py
@@ -0,0 +1,81 @@
+from django.conf import settings
+from django.core.validators import MinLengthValidator
+from django.db import models
+
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+
+
+def generate_public_primary_key_for_team():
+ prefix = "T"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while Team.objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="Team"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+class TeamManager(models.Manager):
+ @staticmethod
+ def sync_for_organization(organization, api_teams: list[dict]):
+ grafana_teams = {team["id"]: team for team in api_teams}
+ existing_team_ids = set(organization.teams.all().values_list("team_id", flat=True))
+
+ # create missing teams
+ teams_to_create = tuple(
+ Team(
+ organization_id=organization.pk,
+ team_id=team["id"],
+ name=team["name"],
+ email=team["email"],
+ avatar_url=team["avatarUrl"],
+ )
+ for team in grafana_teams.values()
+ if team["id"] not in existing_team_ids
+ )
+ organization.teams.bulk_create(teams_to_create, batch_size=5000)
+
+ # delete excess teams
+ team_ids_to_delete = existing_team_ids - grafana_teams.keys()
+ organization.teams.filter(team_id__in=team_ids_to_delete).delete()
+
+ # update existing teams if any fields have changed
+ teams_to_update = []
+ for team in organization.teams.filter(team_id__in=existing_team_ids):
+ grafana_team = grafana_teams[team.team_id]
+ if (
+ team.name != grafana_team["name"]
+ or team.email != grafana_team["email"]
+ or team.avatar_url != grafana_team["avatarUrl"]
+ ):
+ team.name = grafana_team["name"]
+ team.email = grafana_team["email"]
+ team.avatar_url = grafana_team["avatarUrl"]
+ teams_to_update.append(team)
+ organization.teams.bulk_update(teams_to_update, ["name", "email", "avatar_url"], batch_size=5000)
+
+
+class Team(models.Model):
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_team,
+ )
+
+ objects = TeamManager()
+
+ team_id = models.PositiveIntegerField()
+ organization = models.ForeignKey(
+ to="user_management.Organization",
+ related_name="teams",
+ on_delete=models.deletion.CASCADE,
+ )
+ users = models.ManyToManyField(to="user_management.User", related_name="teams")
+ name = models.CharField(max_length=300)
+ email = models.CharField(max_length=300, null=True, blank=True, default=None)
+ avatar_url = models.URLField()
diff --git a/engine/apps/user_management/models/user.py b/engine/apps/user_management/models/user.py
new file mode 100644
index 0000000000..ecdc46e0e6
--- /dev/null
+++ b/engine/apps/user_management/models/user.py
@@ -0,0 +1,248 @@
+import logging
+
+from django.apps import apps
+from django.conf import settings
+from django.core.validators import MinLengthValidator
+from django.db import models
+from django.db.models.signals import post_save
+from django.dispatch import receiver
+from emoji import demojize
+
+from apps.alerts.tasks import invalidate_web_cache_for_alert_group
+from apps.schedules.tasks import drop_cached_ical_for_custom_events_for_organization
+from common.constants.role import Role
+from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
+
+logger = logging.getLogger(__name__)
+
+
+def generate_public_primary_key_for_user():
+ prefix = "U"
+ new_public_primary_key = generate_public_primary_key(prefix)
+
+ failure_counter = 0
+ while User.objects.filter(public_primary_key=new_public_primary_key).exists():
+ new_public_primary_key = increase_public_primary_key_length(
+ failure_counter=failure_counter, prefix=prefix, model_name="User"
+ )
+ failure_counter += 1
+
+ return new_public_primary_key
+
+
+class UserManager(models.Manager):
+ @staticmethod
+ def sync_for_team(team, api_members: list[dict]):
+ user_ids = tuple(member["userId"] for member in api_members)
+ users = team.organization.users.filter(user_id__in=user_ids)
+ team.users.set(users)
+
+ @staticmethod
+ def sync_for_organization(organization, api_users: list[dict]):
+ grafana_users = {user["userId"]: user for user in api_users}
+ existing_user_ids = set(organization.users.all().values_list("user_id", flat=True))
+
+ # create missing users
+ users_to_create = tuple(
+ User(
+ organization_id=organization.pk,
+ user_id=user["userId"],
+ email=user["email"],
+ name=user["name"],
+ username=user["login"],
+ role=Role[user["role"].upper()],
+ avatar_url=user["avatarUrl"],
+ )
+ for user in grafana_users.values()
+ if user["userId"] not in existing_user_ids
+ )
+ organization.users.bulk_create(users_to_create, batch_size=5000)
+
+ # delete excess users
+ user_ids_to_delete = existing_user_ids - grafana_users.keys()
+ organization.users.filter(user_id__in=user_ids_to_delete).delete()
+
+ # update existing users if any fields have changed
+ users_to_update = []
+ for user in organization.users.filter(user_id__in=existing_user_ids):
+ grafana_user = grafana_users[user.user_id]
+ g_user_role = Role[grafana_user["role"].upper()]
+ if (
+ user.email != grafana_user["email"]
+ or user.name != grafana_user["name"]
+ or user.username != grafana_user["login"]
+ or user.role != g_user_role
+ or user.avatar_url != grafana_user["avatarUrl"]
+ ):
+ user.email = grafana_user["email"]
+ user.name = grafana_user["name"]
+ user.username = grafana_user["login"]
+ user.role = g_user_role
+ user.avatar_url = grafana_user["avatarUrl"]
+ users_to_update.append(user)
+
+ organization.users.bulk_update(
+ users_to_update, ["email", "name", "username", "role", "avatar_url"], batch_size=5000
+ )
+
+
+class UserQuerySet(models.QuerySet):
+ def filter(self, *args, **kwargs):
+ return super().filter(*args, **kwargs, is_active=True)
+
+ def filter_with_deleted(self, *args, **kwargs):
+ return super().filter(*args, **kwargs)
+
+ def delete(self):
+ # is_active = None is used to be able to have multiple deleted users with the same user_id
+ return super().update(is_active=None)
+
+ def hard_delete(self):
+ return super().delete()
+
+
+class User(models.Model):
+ objects = UserManager.from_queryset(UserQuerySet)()
+
+ class Meta:
+ # For some reason there are cases when Grafana user gets deleted,
+ # and then new Grafana user is created with the same user_id
+ # Including is_active to unique_together and setting is_active to None allows to
+ # have multiple deleted users with the same user_id, but user_id is unique among active users
+ unique_together = ("user_id", "organization", "is_active")
+
+ public_primary_key = models.CharField(
+ max_length=20,
+ validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
+ unique=True,
+ default=generate_public_primary_key_for_user,
+ )
+
+ user_id = models.PositiveIntegerField()
+ organization = models.ForeignKey(to="user_management.Organization", on_delete=models.CASCADE, related_name="users")
+ current_team = models.ForeignKey(to="user_management.Team", null=True, default=None, on_delete=models.SET_NULL)
+
+ email = models.EmailField()
+ name = models.CharField(max_length=300)
+ username = models.CharField(max_length=300)
+ role = models.PositiveSmallIntegerField(choices=Role.choices())
+ avatar_url = models.URLField()
+
+ notification = models.ManyToManyField("alerts.AlertGroup", through="alerts.UserHasNotification")
+
+ unverified_phone_number = models.CharField(max_length=20, null=True, default=None)
+ _verified_phone_number = models.CharField(max_length=20, null=True, default=None)
+
+ slack_user_identity = models.ForeignKey(
+ "slack.SlackUserIdentity", on_delete=models.PROTECT, null=True, default=None, related_name="users"
+ )
+
+ # is_active = None is used to be able to have multiple deleted users with the same user_id
+ is_active = models.BooleanField(null=True, default=True)
+
+ def __str__(self):
+ return f"{self.pk}: {self.username}"
+
+ @property
+ def is_authenticated(self):
+ return True
+
+ @property
+ def verified_phone_number(self):
+ """
+ Use property to highlight that _verified_phone_number should not be modified directly
+ """
+ return self._verified_phone_number
+
+ def save_verified_phone_number(self, phone_number: str) -> None:
+ self._verified_phone_number = phone_number
+ self.save(update_fields=["_verified_phone_number"])
+
+ def clear_phone_numbers(self) -> None:
+ self.unverified_phone_number = None
+ self._verified_phone_number = None
+ self.save(update_fields=["unverified_phone_number", "_verified_phone_number"])
+
+ # TODO: move to telegram app
+ def is_telegram_connected(self):
+ return hasattr(self, "telegram_connection")
+
+ def self_or_admin(self, user_to_check, organization) -> bool:
+ return user_to_check.pk == self.pk or (
+ user_to_check.role == Role.ADMIN and organization.pk == user_to_check.organization_id
+ )
+
+ @property
+ def is_notification_allowed(self):
+ return self.role in (Role.ADMIN, Role.EDITOR)
+
+ # using in-memory cache instead of redis to avoid pickling python objects
+ # @timed_lru_cache(timeout=100)
+ def get_user_verbal_for_team_for_slack(self, amixr_team=None, slack_team_identity=None, mention=False):
+ slack_verbal = None
+ verbal = self.username
+
+ if self.slack_user_identity:
+ slack_verbal = (
+ f"<@{self.slack_user_identity.slack_id}>"
+ if mention
+ else f"@{self.slack_user_identity.profile_display_name or self.slack_user_identity.slack_verbal}"
+ )
+
+ if slack_verbal:
+ slack_verbal_str = f" ({slack_verbal})"
+ verbal = f"{verbal}{slack_verbal_str}"
+
+ return verbal
+
+ @property
+ def repr_settings_for_client_side_logging(self):
+ """
+ Example of execution:
+ username: Alex, role: Admin, verified phone number: not added, unverified phone number: not added,
+ telegram connected: No,
+ notification policies: default: SMS - 5 min - :telephone:, important: :telephone:
+ """
+ UserNotificationPolicy = apps.get_model("base", "UserNotificationPolicy")
+
+ default, important = UserNotificationPolicy.get_short_verbals_for_user(user=self)
+ notification_policies_verbal = f"default: {' - '.join(default)}, important: {' - '.join(important)}"
+ notification_policies_verbal = demojize(notification_policies_verbal)
+
+ result = (
+ f"username: {self.username}, role: {self.get_role_display()}, "
+ f"verified phone number: "
+ f"{self.verified_phone_number if self.verified_phone_number else 'not added'}, "
+ f"unverified phone number: "
+ f"{self.unverified_phone_number if self.unverified_phone_number else 'not added'}, "
+ f"telegram connected: {'Yes' if self.is_telegram_connected else 'No'}"
+ f"\nnotification policies: {notification_policies_verbal}"
+ )
+ return result
+
+ @property
+ def timezone(self):
+ slack_user_identity = self.slack_user_identity
+ if slack_user_identity:
+ return slack_user_identity.timezone
+ else:
+ return None
+
+ def short(self):
+ return {"username": self.username, "pk": self.public_primary_key, "avatar": self.avatar_url}
+
+
+# TODO: check whether this signal can be moved to save method of the model
+@receiver(post_save, sender=User)
+def listen_for_user_model_save(sender, instance, created, *args, **kwargs):
+ # if kwargs is not None:
+ # if "update_fields" in kwargs:
+ # if kwargs["update_fields"] is not None:
+ # if "username" not in kwargs["update_fields"]:
+ # return
+
+ drop_cached_ical_for_custom_events_for_organization.apply_async(
+ (instance.organization_id,),
+ )
+ logger.info(f"Drop AG cache. Reason: save user {instance.pk}")
+ invalidate_web_cache_for_alert_group.apply_async(kwargs={"org_pk": instance.organization_id})
diff --git a/engine/apps/user_management/organization_log_creator/__init__.py b/engine/apps/user_management/organization_log_creator/__init__.py
new file mode 100644
index 0000000000..5ce5c89099
--- /dev/null
+++ b/engine/apps/user_management/organization_log_creator/__init__.py
@@ -0,0 +1,2 @@
+from .create_organization_log import create_organization_log # noqa: F401
+from .organization_log_type import OrganizationLogType # noqa: F401
diff --git a/engine/apps/user_management/organization_log_creator/create_organization_log.py b/engine/apps/user_management/organization_log_creator/create_organization_log.py
new file mode 100644
index 0000000000..5430e5e776
--- /dev/null
+++ b/engine/apps/user_management/organization_log_creator/create_organization_log.py
@@ -0,0 +1,11 @@
+from django.apps import apps
+
+
+def create_organization_log(organization, author, type, description):
+ OrganizationLogRecord = apps.get_model("base", "OrganizationLogRecord")
+ OrganizationLogRecord.objects.create(
+ organization=organization,
+ author=author,
+ type=type,
+ description=description,
+ )
diff --git a/engine/apps/user_management/organization_log_creator/organization_log_type.py b/engine/apps/user_management/organization_log_creator/organization_log_type.py
new file mode 100644
index 0000000000..5f1f05fb43
--- /dev/null
+++ b/engine/apps/user_management/organization_log_creator/organization_log_type.py
@@ -0,0 +1,52 @@
+class OrganizationLogType:
+ (
+ TYPE_SLACK_DEFAULT_CHANNEL_CHANGED,
+ TYPE_SLACK_WORKSPACE_CONNECTED,
+ TYPE_SLACK_WORKSPACE_DISCONNECTED,
+ TYPE_TELEGRAM_DEFAULT_CHANNEL_CHANGED,
+ TYPE_TELEGRAM_CHANNEL_CONNECTED,
+ TYPE_TELEGRAM_CHANNEL_DISCONNECTED,
+ TYPE_INTEGRATION_CREATED,
+ TYPE_INTEGRATION_DELETED,
+ TYPE_INTEGRATION_CHANGED,
+ TYPE_HEARTBEAT_CREATED,
+ TYPE_HEARTBEAT_CHANGED,
+ TYPE_CHANNEL_FILTER_CREATED,
+ TYPE_CHANNEL_FILTER_DELETED,
+ TYPE_CHANNEL_FILTER_CHANGED,
+ TYPE_ESCALATION_CHAIN_CREATED,
+ TYPE_ESCALATION_CHAIN_DELETED,
+ TYPE_ESCALATION_CHAIN_CHANGED,
+ TYPE_ESCALATION_STEP_CREATED,
+ TYPE_ESCALATION_STEP_DELETED,
+ TYPE_ESCALATION_STEP_CHANGED,
+ TYPE_MAINTENANCE_STARTED_FOR_ORGANIZATION,
+ TYPE_MAINTENANCE_STARTED_FOR_INTEGRATION,
+ TYPE_MAINTENANCE_STOPPED_FOR_ORGANIZATION,
+ TYPE_MAINTENANCE_STOPPED_FOR_INTEGRATION,
+ TYPE_MAINTENANCE_DEBUG_STARTED_FOR_ORGANIZATION,
+ TYPE_MAINTENANCE_DEBUG_STARTED_FOR_INTEGRATION,
+ TYPE_MAINTENANCE_DEBUG_STOPPED_FOR_ORGANIZATION,
+ TYPE_MAINTENANCE_DEBUG_STOPPED_FOR_INTEGRATION,
+ TYPE_CUSTOM_ACTION_CREATED,
+ TYPE_CUSTOM_ACTION_DELETED,
+ TYPE_CUSTOM_ACTION_CHANGED,
+ TYPE_SCHEDULE_CREATED,
+ TYPE_SCHEDULE_DELETED,
+ TYPE_SCHEDULE_CHANGED,
+ TYPE_ON_CALL_SHIFT_CREATED,
+ TYPE_ON_CALL_SHIFT_DELETED,
+ TYPE_ON_CALL_SHIFT_CHANGED,
+ TYPE_NEW_USER_ADDED,
+ TYPE_ORGANIZATION_SETTINGS_CHANGED,
+ TYPE_USER_SETTINGS_CHANGED,
+ TYPE_TELEGRAM_TO_USER_CONNECTED,
+ TYPE_TELEGRAM_FROM_USER_DISCONNECTED,
+ TYPE_API_TOKEN_CREATED,
+ TYPE_API_TOKEN_REVOKED,
+ TYPE_ESCALATION_CHAIN_COPIED,
+ TYPE_SCHEDULE_EXPORT_TOKEN_CREATED,
+ TYPE_MESSAGING_BACKEND_CHANNEL_CHANGED,
+ TYPE_MESSAGING_BACKEND_CHANNEL_DELETED,
+ TYPE_MESSAGING_BACKEND_USER_DISCONNECTED,
+ ) = range(49)
diff --git a/engine/apps/user_management/subscription_strategy/__init__.py b/engine/apps/user_management/subscription_strategy/__init__.py
new file mode 100644
index 0000000000..b3dc049d6e
--- /dev/null
+++ b/engine/apps/user_management/subscription_strategy/__init__.py
@@ -0,0 +1 @@
+from .free_public_beta_subscription_strategy import FreePublicBetaSubscriptionStrategy # noqa: F401
diff --git a/engine/apps/user_management/subscription_strategy/base_subsription_strategy.py b/engine/apps/user_management/subscription_strategy/base_subsription_strategy.py
new file mode 100644
index 0000000000..5d7c61dac4
--- /dev/null
+++ b/engine/apps/user_management/subscription_strategy/base_subsription_strategy.py
@@ -0,0 +1,22 @@
+from abc import ABC, abstractmethod
+
+
+class BaseSubscriptionStrategy(ABC):
+ def __init__(self, organization):
+ self.organization = organization
+
+ @abstractmethod
+ def notifications_limit_web_report(self, user):
+ raise NotImplementedError
+
+ @abstractmethod
+ def phone_calls_left(self, user):
+ raise NotImplementedError
+
+ @abstractmethod
+ def sms_left(self, user):
+ raise NotImplementedError
+
+ @abstractmethod
+ def emails_left(self, user):
+ raise NotImplementedError
diff --git a/engine/apps/user_management/subscription_strategy/free_public_beta_subscription_strategy.py b/engine/apps/user_management/subscription_strategy/free_public_beta_subscription_strategy.py
new file mode 100644
index 0000000000..32f9fa6980
--- /dev/null
+++ b/engine/apps/user_management/subscription_strategy/free_public_beta_subscription_strategy.py
@@ -0,0 +1,83 @@
+from datetime import datetime
+
+from django.apps import apps
+
+from .base_subsription_strategy import BaseSubscriptionStrategy
+
+
+class FreePublicBetaSubscriptionStrategy(BaseSubscriptionStrategy):
+ """
+ This is subscription for beta inside grafana.
+ This subscription is responsible only for limiting calls, sms and emails. Notifications limited per user per day.
+ User management and limitations happens on grafana side.
+ """
+
+ PHONE_NOTIFICATIONS_LIMIT = 200
+ EMAILS_LIMIT = 200
+
+ def phone_calls_left(self, user):
+ return self._calculate_phone_notifications_left(user)
+
+ def sms_left(self, user):
+ return self._calculate_phone_notifications_left(user)
+
+ def emails_left(self, user):
+ # Email notifications are disabled now.
+ EmailMessage = apps.get_model("sendgridapp", "EmailMessage")
+ now = datetime.now()
+ day_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
+ emails_this_week = EmailMessage.objects.filter(
+ created_at__gte=day_start,
+ represents_alert_group__channel__organization=self.organization,
+ receiver=user,
+ ).count()
+ return self._emails_limit - emails_this_week
+
+ def notifications_limit_web_report(self, user):
+ limits_to_show = []
+ left = self._calculate_phone_notifications_left(user)
+ limit = self._phone_notifications_limit
+ limits_to_show.append({"limit_title": "Phone Calls & SMS", "total": limit, "left": left})
+ show_limits_warning = left <= limit * 0.2 # Show limit popup if less than 20% of notifications left
+
+ warning_text = (
+ f"You{'' if left == 0 else ' almost'} have exceeded the limit of phone calls and sms:"
+ f" {left} of {limit} left."
+ )
+
+ return {
+ "period_title": "Daily limit",
+ "limits_to_show": limits_to_show,
+ "show_limits_warning": show_limits_warning,
+ "warning_text": warning_text,
+ }
+
+ def _calculate_phone_notifications_left(self, user):
+ """
+ Count sms and calls together and they have common limit.
+ For FreePublicBetaSubscriptionStrategy notifications are counted per day
+ """
+ PhoneCall = apps.get_model("twilioapp", "PhoneCall")
+ SMSMessage = apps.get_model("twilioapp", "SMSMessage")
+ now = datetime.now()
+ day_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
+ calls_today = PhoneCall.objects.filter(
+ created_at__gte=day_start,
+ represents_alert_group__channel__organization=self.organization,
+ receiver=user,
+ ).count()
+ sms_today = SMSMessage.objects.filter(
+ created_at__gte=day_start,
+ represents_alert_group__channel__organization=self.organization,
+ receiver=user,
+ ).count()
+
+ return self._phone_notifications_limit - calls_today - sms_today
+
+ @property
+ def _phone_notifications_limit(self):
+ return self.PHONE_NOTIFICATIONS_LIMIT
+
+ @property
+ def _emails_limit(self):
+ return self.EMAILS_LIMIT
diff --git a/engine/apps/user_management/sync.py b/engine/apps/user_management/sync.py
new file mode 100644
index 0000000000..46f9cda179
--- /dev/null
+++ b/engine/apps/user_management/sync.py
@@ -0,0 +1,100 @@
+import logging
+
+from celery.utils.log import get_task_logger
+from django.utils import timezone
+from rest_framework import status
+
+from apps.grafana_plugin.helpers.client import GcomAPIClient, GrafanaAPIClient
+from apps.user_management.models import Organization, Team, User
+
+logger = get_task_logger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+def sync_organization(organization):
+ client = GrafanaAPIClient(api_url=organization.grafana_url, api_token=organization.api_token)
+
+ api_users, call_status = client.get_users()
+ status_code = call_status["status_code"]
+
+ # if stack is 404ing, delete the organization in case gcom stack is deleted.
+ if status_code == status.HTTP_404_NOT_FOUND:
+ is_deleted = delete_organization_if_needed(organization)
+ if is_deleted:
+ return
+
+ sync_instance_info(organization)
+
+ if api_users:
+ organization.api_token_status = Organization.API_TOKEN_STATUS_OK
+ sync_users_and_teams(client, api_users, organization)
+ else:
+ organization.api_token_status = Organization.API_TOKEN_STATUS_FAILED
+
+ organization.save(
+ update_fields=[
+ "stack_slug",
+ "org_slug",
+ "org_title",
+ "grafana_url",
+ "last_time_synced",
+ "api_token_status",
+ "gcom_token_org_last_time_synced",
+ ]
+ )
+
+
+def sync_instance_info(organization):
+ if organization.gcom_token:
+ gcom_client = GcomAPIClient(organization.gcom_token)
+ instance_info, _ = gcom_client.get_instance_info(organization.stack_id)
+ if not instance_info or str(instance_info["orgId"]) != organization.org_id:
+ return
+
+ organization.stack_slug = instance_info["slug"]
+ organization.org_slug = instance_info["orgSlug"]
+ organization.org_title = instance_info["orgName"]
+ organization.grafana_url = instance_info["url"]
+ organization.gcom_token_org_last_time_synced = timezone.now()
+
+
+def sync_users_and_teams(client, api_users, organization):
+ # check if api_users are shaped correctly. e.g. for paused instance, the response is not a list.
+ if not api_users or not isinstance(api_users, (tuple, list)):
+ return
+
+ User.objects.sync_for_organization(organization=organization, api_users=api_users)
+
+ api_teams_result, _ = client.get_teams()
+ if not api_teams_result:
+ return
+
+ api_teams = api_teams_result["teams"]
+ Team.objects.sync_for_organization(organization=organization, api_teams=api_teams)
+
+ for team in organization.teams.all():
+ members, _ = client.get_team_members(team.team_id)
+ if not members:
+ continue
+ User.objects.sync_for_team(team=team, api_members=members)
+
+ organization.last_time_synced = timezone.now()
+
+
+def delete_organization_if_needed(organization):
+ if organization.gcom_token is None:
+ return False
+
+ gcom_client = GcomAPIClient(organization.gcom_token)
+ is_stack_deleted = gcom_client.is_stack_deleted(organization.stack_id)
+
+ if not is_stack_deleted:
+ return False
+
+ logger.info(
+ f"Deleting organization due to stack deletion. "
+ f"pk: {organization.pk}, stack_id: {organization.stack_id}, org_id: {organization.org_id}"
+ )
+ organization.delete()
+
+ return True
diff --git a/engine/apps/user_management/tests/__init__.py b/engine/apps/user_management/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/apps/user_management/tests/factories.py b/engine/apps/user_management/tests/factories.py
new file mode 100644
index 0000000000..79b20231df
--- /dev/null
+++ b/engine/apps/user_management/tests/factories.py
@@ -0,0 +1,33 @@
+import factory
+
+from apps.user_management.models import Organization, Team, User
+from common.utils import UniqueFaker
+
+
+class OrganizationFactory(factory.DjangoModelFactory):
+ org_title = factory.Faker("word")
+ stack_id = UniqueFaker("pyint")
+ org_id = UniqueFaker("pyint")
+
+ class Meta:
+ model = Organization
+
+
+class UserFactory(factory.DjangoModelFactory):
+ username = factory.Faker("user_name")
+ email = factory.Faker("email")
+ user_id = UniqueFaker("pyint")
+ avatar_url = factory.Faker("url")
+
+ class Meta:
+ model = User
+
+
+class TeamFactory(factory.DjangoModelFactory):
+ name = factory.Faker("user_name")
+ email = factory.Faker("email")
+ team_id = UniqueFaker("pyint")
+ avatar_url = factory.Faker("url")
+
+ class Meta:
+ model = Team
diff --git a/engine/apps/user_management/tests/test_free_public_beta_subcription_strategy.py b/engine/apps/user_management/tests/test_free_public_beta_subcription_strategy.py
new file mode 100644
index 0000000000..0f30957044
--- /dev/null
+++ b/engine/apps/user_management/tests/test_free_public_beta_subcription_strategy.py
@@ -0,0 +1,84 @@
+import sys
+
+import pytest
+
+from apps.sendgridapp.constants import SendgridEmailMessageStatuses
+from apps.twilioapp.constants import TwilioCallStatuses, TwilioMessageStatuses
+from common.constants.role import Role
+
+
+@pytest.mark.django_db
+def test_phone_calls_left(
+ make_organization,
+ make_user_for_organization,
+ make_phone_call,
+ make_alert_receive_channel,
+ make_alert_group,
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization, role=Role.ADMIN)
+ user = make_user_for_organization(organization, role=Role.EDITOR)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+ make_phone_call(receiver=admin, status=TwilioCallStatuses.COMPLETED, represents_alert_group=alert_group)
+
+ assert organization.phone_calls_left(admin) == organization.subscription_strategy._phone_notifications_limit - 1
+ assert organization.phone_calls_left(user) == organization.subscription_strategy._phone_notifications_limit
+
+
+@pytest.mark.django_db
+def test_sms_left(
+ make_organization, make_user_for_organization, make_sms, make_alert_receive_channel, make_alert_group
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization, role=Role.ADMIN)
+ user = make_user_for_organization(organization, role=Role.EDITOR)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+ make_sms(receiver=admin, status=TwilioMessageStatuses.SENT, represents_alert_group=alert_group)
+
+ assert organization.sms_left(admin) == organization.subscription_strategy._phone_notifications_limit - 1
+ assert organization.sms_left(user) == organization.subscription_strategy._phone_notifications_limit
+
+
+@pytest.mark.django_db
+def test_phone_calls_and_sms_counts_together(
+ make_organization,
+ make_user_for_organization,
+ make_phone_call,
+ make_sms,
+ make_alert_receive_channel,
+ make_alert_group,
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization, role=Role.ADMIN)
+ user = make_user_for_organization(organization, role=Role.EDITOR)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+ make_phone_call(receiver=admin, status=TwilioCallStatuses.COMPLETED, represents_alert_group=alert_group)
+ make_sms(receiver=admin, status=TwilioMessageStatuses.SENT, represents_alert_group=alert_group)
+
+ assert organization.phone_calls_left(admin) == organization.subscription_strategy._phone_notifications_limit - 2
+ assert organization.sms_left(admin) == organization.subscription_strategy._phone_notifications_limit - 2
+
+ assert organization.phone_calls_left(user) == organization.subscription_strategy._phone_notifications_limit
+ assert organization.sms_left(user) == organization.subscription_strategy._phone_notifications_limit
+
+
+@pytest.mark.skip(reason="email disabled")
+@pytest.mark.django_db
+def test_emails_left(
+ make_organization,
+ make_user_for_organization,
+ make_email_message,
+ make_alert_receive_channel,
+ make_alert_group,
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization, role=Role.ADMIN)
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+ make_email_message(
+ receiver=admin, status=SendgridEmailMessageStatuses.DELIVERED, represents_alert_group=alert_group
+ ),
+ assert organization.emails_left(admin) == sys.maxsize
diff --git a/engine/apps/user_management/tests/test_organization.py b/engine/apps/user_management/tests/test_organization.py
new file mode 100644
index 0000000000..83f342e64b
--- /dev/null
+++ b/engine/apps/user_management/tests/test_organization.py
@@ -0,0 +1,169 @@
+import pytest
+from django.core.exceptions import ObjectDoesNotExist
+from django.utils import timezone
+
+from apps.alerts.models import AlertGroupLogRecord, EscalationPolicy
+from apps.base.models import UserNotificationPolicy, UserNotificationPolicyLogRecord
+from apps.schedules.models import OnCallScheduleCalendar
+from apps.telegram.models import TelegramMessage
+from apps.twilioapp.constants import TwilioCallStatuses, TwilioMessageStatuses
+
+
+@pytest.mark.django_db
+def test_organization_delete(
+ make_organization,
+ make_user,
+ make_team,
+ make_slack_team_identity,
+ make_slack_user_identity,
+ make_slack_message,
+ make_slack_action_record,
+ make_schedule,
+ make_custom_action,
+ make_alert_receive_channel,
+ make_escalation_chain,
+ make_escalation_policy,
+ make_channel_filter,
+ make_organization_log_record,
+ make_user_notification_policy,
+ make_telegram_user_connector,
+ make_telegram_channel,
+ make_telegram_verification_code,
+ make_telegram_channel_verification_code,
+ make_telegram_message,
+ make_alert,
+ make_alert_group,
+ make_alert_group_log_record,
+ make_user_notification_policy_log_record,
+ make_sms,
+ make_phone_call,
+ make_token_for_organization,
+ make_public_api_token,
+ make_invitation,
+ make_resolution_note,
+ make_resolution_note_slack_message,
+):
+ slack_team_identity = make_slack_team_identity()
+ organization = make_organization(slack_team_identity=slack_team_identity)
+
+ slack_user_identity_1 = make_slack_user_identity(slack_team_identity=slack_team_identity, slack_id="USER_1")
+ slack_user_identity_2 = make_slack_user_identity(slack_team_identity=slack_team_identity, slack_id="USER_2")
+
+ user_1 = make_user(organization=organization, slack_user_identity=slack_user_identity_1)
+ user_2 = make_user(organization=organization, slack_user_identity=slack_user_identity_2)
+
+ user_notification_policy = make_user_notification_policy(
+ user=user_1, step=UserNotificationPolicy.Step.WAIT, wait_delay=timezone.timedelta(minutes=15), important=False
+ )
+
+ team = make_team(organization=organization)
+ team.users.add(user_1)
+
+ schedule = make_schedule(organization=organization, schedule_class=OnCallScheduleCalendar)
+ custom_action = make_custom_action(organization=organization)
+
+ escalation_chain = make_escalation_chain(organization=organization)
+ escalation_policy = make_escalation_policy(
+ escalation_chain=escalation_chain,
+ escalation_policy_step=EscalationPolicy.STEP_WAIT,
+ wait_delay=EscalationPolicy.ONE_MINUTE,
+ last_notified_user=user_1,
+ )
+ escalation_policy.notify_to_users_queue.set([user_1, user_2])
+
+ alert_receive_channel = make_alert_receive_channel(organization=organization, author=user_1)
+ channel_filter = make_channel_filter(alert_receive_channel, is_default=True, escalation_chain=escalation_chain)
+
+ organization_log_record = make_organization_log_record(organization=organization, user=user_1)
+
+ alert_group = make_alert_group(
+ alert_receive_channel=alert_receive_channel,
+ acknowledged_by_user=user_1,
+ silenced_by_user=user_2,
+ wiped_by=user_2,
+ )
+
+ alert = make_alert(alert_group=alert_group, raw_request_data={})
+ alert_group.resolved_by_alert = alert
+ alert_group.save(update_fields=["resolved_by_alert"])
+
+ user_notification_policy_log_record = make_user_notification_policy_log_record(
+ author=user_1,
+ type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
+ notification_policy=user_notification_policy,
+ notification_step=user_notification_policy.step,
+ notification_channel=user_notification_policy.notify_by,
+ alert_group=alert_group,
+ )
+
+ sms = make_sms(
+ receiver=user_1, status=TwilioMessageStatuses.SENT, represents_alert=alert, represents_alert_group=alert_group
+ )
+
+ phone_call = make_phone_call(
+ receiver=user_1, status=TwilioCallStatuses.COMPLETED, represents_alert=alert, represents_alert_group=alert_group
+ )
+
+ telegram_user_connector = make_telegram_user_connector(user=user_1)
+ telegram_channel = make_telegram_channel(organization=organization)
+ telegram_verification_code = make_telegram_verification_code(user=user_1)
+ telegram_channel_verification_code = make_telegram_channel_verification_code(
+ organization=organization, author=user_1
+ )
+ telegram_message = make_telegram_message(alert_group=alert_group, message_type=TelegramMessage.ALERT_GROUP_MESSAGE)
+
+ slack_message = make_slack_message(alert_group=alert_group)
+ slack_action_record = make_slack_action_record(organization=organization, user=user_1)
+
+ plugin_token, _ = make_token_for_organization(organization)
+ public_api_token, _ = make_public_api_token(user_1, organization)
+
+ invitation = make_invitation(alert_group=alert_group, author=user_1, invitee=user_2)
+
+ alert_group_log_record = make_alert_group_log_record(
+ alert_group=alert_group, author=user_1, type=AlertGroupLogRecord.TYPE_ACK, invitation=invitation
+ )
+
+ resolution_note_slack_message = make_resolution_note_slack_message(
+ alert_group=alert_group, user=user_1, added_by_user=user_2
+ )
+ resolution_note = make_resolution_note(
+ alert_group=alert_group, author=user_1, resolution_note_slack_message=resolution_note_slack_message
+ )
+
+ cascading_objects = [
+ user_1,
+ user_2,
+ team,
+ user_notification_policy,
+ schedule,
+ custom_action,
+ escalation_chain,
+ escalation_policy,
+ alert_receive_channel,
+ channel_filter,
+ organization_log_record,
+ alert_group,
+ alert,
+ alert_group_log_record,
+ user_notification_policy_log_record,
+ phone_call,
+ sms,
+ telegram_message,
+ telegram_user_connector,
+ telegram_channel,
+ telegram_verification_code,
+ telegram_channel_verification_code,
+ slack_message,
+ slack_action_record,
+ plugin_token,
+ public_api_token,
+ invitation,
+ resolution_note,
+ resolution_note_slack_message,
+ ]
+
+ organization.delete()
+ for obj in cascading_objects:
+ with pytest.raises(ObjectDoesNotExist):
+ obj.refresh_from_db()
diff --git a/engine/apps/user_management/tests/test_sync.py b/engine/apps/user_management/tests/test_sync.py
new file mode 100644
index 0000000000..f7a31e7d2e
--- /dev/null
+++ b/engine/apps/user_management/tests/test_sync.py
@@ -0,0 +1,198 @@
+from unittest.mock import patch
+
+import pytest
+from django.core.exceptions import ObjectDoesNotExist
+
+from apps.grafana_plugin.helpers.client import GcomAPIClient, GrafanaAPIClient
+from apps.user_management.models import Team, User
+from apps.user_management.sync import sync_organization
+
+
+@pytest.mark.django_db
+def test_sync_users_for_organization(make_organization, make_user_for_organization):
+ organization = make_organization()
+ users = tuple(make_user_for_organization(organization, user_id=user_id) for user_id in (1, 2))
+
+ api_users = tuple(
+ {
+ "userId": user_id,
+ "email": "test@test.test",
+ "name": "Test",
+ "login": "test",
+ "role": "admin",
+ "avatarUrl": "test.test/test",
+ }
+ for user_id in (2, 3)
+ )
+
+ User.objects.sync_for_organization(organization, api_users=api_users)
+
+ assert organization.users.count() == 2
+
+ # check that excess users are deleted
+ assert not organization.users.filter(pk=users[0].pk).exists()
+
+ # check that existing users are updated
+ updated_user = organization.users.filter(pk=users[1].pk).first()
+ assert updated_user is not None
+ assert updated_user.name == api_users[0]["name"]
+ assert updated_user.email == api_users[0]["email"]
+
+ # check that missing users are created
+ created_user = organization.users.filter(user_id=api_users[1]["userId"]).first()
+ assert created_user is not None
+ assert created_user.user_id == api_users[1]["userId"]
+ assert created_user.name == api_users[1]["name"]
+
+
+@pytest.mark.django_db
+def test_sync_teams_for_organization(make_organization, make_team):
+ organization = make_organization()
+ teams = tuple(make_team(organization, team_id=team_id) for team_id in (1, 2))
+
+ api_teams = tuple(
+ {"id": team_id, "name": "Test", "email": "test@test.test", "avatarUrl": "test.test/test"} for team_id in (2, 3)
+ )
+
+ Team.objects.sync_for_organization(organization, api_teams=api_teams)
+
+ assert organization.teams.count() == 2
+
+ # check that excess teams are deleted
+ assert not organization.teams.filter(pk=teams[0].pk).exists()
+
+ # check that existing teams are updated
+ updated_team = organization.teams.filter(pk=teams[1].pk).first()
+ assert updated_team is not None
+ assert updated_team.name == api_teams[0]["name"]
+ assert updated_team.email == api_teams[0]["email"]
+
+ # check that missing teams are created
+ created_team = organization.teams.filter(team_id=api_teams[1]["id"]).first()
+ assert created_team is not None
+ assert created_team.team_id == api_teams[1]["id"]
+ assert created_team.name == api_teams[1]["name"]
+
+
+@pytest.mark.django_db
+def test_sync_users_for_team(make_organization, make_user_for_organization, make_team):
+ organization = make_organization()
+ team = make_team(organization)
+ users = tuple(make_user_for_organization(organization) for _ in range(2))
+
+ api_members = (
+ {
+ "orgId": organization.org_id,
+ "teamId": team.team_id,
+ "userId": users[0].user_id,
+ },
+ )
+
+ User.objects.sync_for_team(team, api_members=api_members)
+
+ assert team.users.count() == 1
+ assert team.users.get() == users[0]
+
+
+@pytest.mark.django_db
+def test_sync_organization(
+ make_organization,
+ make_team,
+ make_user_for_organization,
+):
+ organization = make_organization()
+
+ api_users_response = (
+ {
+ "userId": 1,
+ "email": "test@test.test",
+ "name": "Test",
+ "login": "test",
+ "role": "admin",
+ "avatarUrl": "test.test/test",
+ },
+ )
+
+ api_teams_response = {
+ "totalCount": 1,
+ "teams": (
+ {
+ "id": 1,
+ "name": "Test",
+ "email": "test@test.test",
+ "avatarUrl": "test.test/test",
+ },
+ ),
+ }
+
+ api_members_response = (
+ {
+ "orgId": organization.org_id,
+ "teamId": 1,
+ "userId": 1,
+ },
+ )
+
+ with patch.object(GrafanaAPIClient, "get_users", return_value=(api_users_response, {"status_code": 200})):
+ with patch.object(GrafanaAPIClient, "get_teams", return_value=(api_teams_response, None)):
+ with patch.object(GrafanaAPIClient, "get_team_members", return_value=(api_members_response, None)):
+ sync_organization(organization)
+
+ # check that users are populated
+ assert organization.users.count() == 1
+ user = organization.users.get()
+ assert user.user_id == 1
+
+ # check that teams are populated
+ assert organization.teams.count() == 1
+ team = organization.teams.get()
+ assert team.team_id == 1
+
+ # check that team members are populated
+ assert team.users.count() == 1
+ assert team.users.get() == user
+
+
+@pytest.mark.django_db
+def test_duplicate_user_ids(make_organization, make_user_for_organization):
+ organization = make_organization()
+
+ user = make_user_for_organization(organization, user_id=1)
+ api_users = []
+
+ User.objects.sync_for_organization(organization, api_users=api_users)
+
+ user.refresh_from_db()
+
+ assert user.is_active is None
+ assert organization.users.count() == 0
+ assert User.objects.filter_with_deleted().count() == 1
+
+ api_users = [
+ {
+ "userId": 1,
+ "email": "newtest@test.test",
+ "name": "New Test",
+ "login": "test",
+ "role": "admin",
+ "avatarUrl": "test.test/test",
+ }
+ ]
+
+ User.objects.sync_for_organization(organization, api_users=api_users)
+
+ assert organization.users.count() == 1
+ assert organization.users.get().email == "newtest@test.test"
+ assert User.objects.filter_with_deleted().count() == 2
+
+
+@pytest.mark.django_db
+def test_sync_organization_deleted(make_organization):
+ organization = make_organization(gcom_token="TEST_GCOM_TOKEN")
+
+ with patch.object(GrafanaAPIClient, "get_users", return_value=(None, {"status_code": 404})):
+ with patch.object(GcomAPIClient, "get_instance_info", return_value=({"status": "deleted"}, None)):
+ sync_organization(organization)
+
+ with pytest.raises(ObjectDoesNotExist):
+ organization.refresh_from_db()
diff --git a/engine/apps/user_management/tests/test_user.py b/engine/apps/user_management/tests/test_user.py
new file mode 100644
index 0000000000..fe615c7c96
--- /dev/null
+++ b/engine/apps/user_management/tests/test_user.py
@@ -0,0 +1,24 @@
+# from unittest.mock import Mock, patch
+
+import pytest
+
+from common.constants.role import Role
+
+
+@pytest.mark.django_db
+def test_self_or_admin(
+ make_organization,
+ make_user_for_organization,
+):
+ organization = make_organization()
+ admin = make_user_for_organization(organization)
+ second_admin = make_user_for_organization(organization)
+ editor = make_user_for_organization(organization, role=Role.EDITOR)
+
+ another_organization = make_organization()
+ admin_from_another_organization = make_user_for_organization(another_organization)
+
+ assert admin.self_or_admin(admin, organization) is True
+ assert admin.self_or_admin(editor, organization) is False
+ assert admin.self_or_admin(second_admin, organization) is True
+ assert admin.self_or_admin(admin_from_another_organization, organization) is False
diff --git a/engine/apps/user_management/user_representative.py b/engine/apps/user_management/user_representative.py
new file mode 100644
index 0000000000..229e4c9b7b
--- /dev/null
+++ b/engine/apps/user_management/user_representative.py
@@ -0,0 +1,16 @@
+from abc import ABC, abstractmethod
+
+from django.apps import apps
+
+
+class UserAbstractRepresentative(ABC):
+ HANDLER_PREFIX = "on_"
+
+ @abstractmethod
+ def is_applicable(self):
+ return None
+
+ @staticmethod
+ def get_handlers_map():
+ UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
+ return UserNotificationPolicyLogRecord.TYPE_TO_HANDLERS_MAP
diff --git a/engine/celery_with_exporter.sh b/engine/celery_with_exporter.sh
new file mode 100755
index 0000000000..360cc86437
--- /dev/null
+++ b/engine/celery_with_exporter.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+set -x
+
+# If $CELERY_WORKER_SHUTDOWN_INTERVAL env variable is set,
+# then add the background process to shutdown celery in $CELERY_WORKER_SHUTDOWN_INTERVAL
+if [ -n "$CELERY_WORKER_SHUTDOWN_INTERVAL" ]; then
+ sleep $CELERY_WORKER_SHUTDOWN_INTERVAL && celery -A engine control shutdown &
+fi
+
+# Validating required parameters
+if [ -z "$CELERY_WORKER_QUEUE" ]; then
+ echo "CELERY_WORKER_QUEUE is not set"
+ exit 1
+fi
+
+if [ -z "$CELERY_WORKER_CONCURRENCY" ]; then
+ echo "CELERY_WORKER_CONCURRENCY is not set"
+ exit 1
+fi
+
+if [ -z "$CELERY_WORKER_MAX_TASKS_PER_CHILD" ]; then
+ echo "CELERY_WORKER_MAX_TASKS_PER_CHILD is not set"
+ exit 1
+fi
+
+CELERY_ARGS=(
+ "-A" "engine"
+ "worker"
+ "-l" "info"
+ "--quiet" # --quite parameter removes pointless banner when celery starts
+ "--concurrency=$CELERY_WORKER_CONCURRENCY"
+ "--max-tasks-per-child=$CELERY_WORKER_MAX_TASKS_PER_CHILD"
+ "-Q" "$CELERY_WORKER_QUEUE"
+)
+if [[ $CELERY_WORKER_BEAT_ENABLED = True ]]; then
+ CELERY_ARGS+=("--beat")
+fi
+
+celery "${CELERY_ARGS[@]}"
diff --git a/engine/common/__init__.py b/engine/common/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/common/admin.py b/engine/common/admin.py
new file mode 100644
index 0000000000..70b1c75837
--- /dev/null
+++ b/engine/common/admin.py
@@ -0,0 +1,61 @@
+from django.contrib import admin
+from django.core.exceptions import FieldDoesNotExist
+from django.db.models import ForeignKey, Model
+
+
+class RawForeignKeysMixin:
+ model: Model
+
+ @property
+ def raw_id_fields(self) -> tuple[str]:
+ fields = self.model._meta.fields
+ fk_field_names = tuple(str(field.name) for field in fields if isinstance(field, ForeignKey))
+
+ return fk_field_names
+
+
+class SearchableByIdsMixin:
+ model: Model
+
+ @property
+ def search_fields(self) -> tuple[str]:
+ search_fields = (
+ "id",
+ "public_primary_key",
+ )
+
+ existing_fields = []
+
+ for field in search_fields:
+ try:
+ self.model._meta.get_field(field)
+ except FieldDoesNotExist:
+ continue
+
+ existing_fields.append(field)
+
+ return tuple(existing_fields)
+
+
+class SelectRelatedMixin:
+ model: Model
+ list_display: tuple[str]
+
+ @property
+ def list_select_related(self) -> tuple[str]:
+ fk_field_names = []
+
+ for field_name in self.list_display:
+ try:
+ field = self.model._meta.get_field(field_name)
+ except FieldDoesNotExist:
+ continue
+
+ if isinstance(field, ForeignKey):
+ fk_field_names.append(str(field.name))
+
+ return tuple(fk_field_names)
+
+
+class CustomModelAdmin(SearchableByIdsMixin, RawForeignKeysMixin, SelectRelatedMixin, admin.ModelAdmin):
+ pass
diff --git a/engine/common/api_helpers/__init__.py b/engine/common/api_helpers/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/common/api_helpers/custom_fields.py b/engine/common/api_helpers/custom_fields.py
new file mode 100644
index 0000000000..597aff5251
--- /dev/null
+++ b/engine/common/api_helpers/custom_fields.py
@@ -0,0 +1,173 @@
+import time
+
+from django.core.exceptions import ObjectDoesNotExist
+from rest_framework import fields, serializers
+from rest_framework.exceptions import ValidationError
+from rest_framework.relations import RelatedField
+
+from apps.alerts.models import ChannelFilter
+from apps.user_management.models import User
+from common.api_helpers.exceptions import BadRequest
+
+
+class OrganizationFilteredPrimaryKeyRelatedField(RelatedField):
+ """
+ This field is used to filter entities by organization
+ """
+
+ def __init__(self, **kwargs):
+ self.filter_field = kwargs.pop("filter_field", "organization")
+ self.display_func = kwargs.pop("display_func", lambda instance: str(instance))
+ super().__init__(**kwargs)
+
+ def to_representation(self, value):
+ return value.public_primary_key
+
+ def to_internal_value(self, data):
+ try:
+ return self.get_queryset().get(public_primary_key=data)
+ except ObjectDoesNotExist:
+ raise ValidationError("Object does not exist")
+ except (TypeError, ValueError):
+ raise ValidationError("Invalid values")
+
+ def get_queryset(self):
+ request = self.context.get("request", None)
+ queryset = self.queryset
+ if not request or not queryset:
+ return None
+ filter_kwargs = {self.filter_field: request.auth.organization}
+ return queryset.filter(**filter_kwargs).distinct()
+
+ def display_value(self, instance):
+ return self.display_func(instance)
+
+
+class TeamPrimaryKeyRelatedField(RelatedField):
+ """
+ This field is used to get user teams
+ """
+
+ def __init__(self, **kwargs):
+ self.display_func = kwargs.pop("display_func", lambda instance: str(instance))
+ super().__init__(**kwargs)
+
+ def to_representation(self, value):
+ return value.public_primary_key
+
+ def to_internal_value(self, data):
+ try:
+ return self.get_queryset().get(public_primary_key=data)
+ except ObjectDoesNotExist:
+ raise ValidationError("Object does not exist")
+ except (TypeError, ValueError):
+ raise ValidationError("Invalid values")
+
+ def get_queryset(self):
+ request = self.context.get("request", None)
+ if not request:
+ return None
+ return request.user.teams.all()
+
+ def display_value(self, instance):
+ return self.display_func(instance)
+
+
+class UsersFilteredByOrganizationField(serializers.Field):
+ """
+ This field reduces queries count when accessing User many related field (ex: notify_to_users_queue).
+ Check if you can use OrganizationFilteredPrimaryKeyRelatedField before using this one.
+ """
+
+ def __init__(self, **kwargs):
+ self.queryset = kwargs.pop("queryset", None)
+ super().__init__(**kwargs)
+
+ def to_representation(self, value):
+ return list(map(lambda v: v.public_primary_key, value.all()))
+
+ def to_internal_value(self, data):
+ queryset = self.queryset
+ request = self.context.get("request", None)
+
+ if not request or not queryset:
+ return None
+
+ return queryset.filter(organization=request.user.organization, public_primary_key__in=data).distinct()
+
+
+class WritableSerializerMethodField(serializers.SerializerMethodField):
+ """
+ Please, NEVER use this field.
+ It was a mistake to create this one due to necessity to dig deep in drf to fix bugs there.
+ This field is a workaround to allow to write into SerializerMethodField.
+ """
+
+ def __init__(self, method_name=None, **kwargs):
+ self.method_name = method_name
+ self.setter_method_name = kwargs.pop("setter_method_name", None)
+ self.deserializer_field = kwargs.pop("deserializer_field")
+
+ kwargs["source"] = "*"
+ super(serializers.SerializerMethodField, self).__init__(**kwargs)
+
+ def bind(self, field_name, parent):
+ retval = super().bind(field_name, parent)
+ if not self.setter_method_name:
+ self.setter_method_name = f"set_{field_name}"
+
+ return retval
+
+ def to_internal_value(self, data):
+ value = self.deserializer_field.to_internal_value(data)
+ method = getattr(self.parent, self.setter_method_name)
+ method(value)
+ return {self.method_name: value}
+
+
+class CustomTimeField(fields.TimeField):
+ def to_representation(self, value):
+ result = super().to_representation(value)
+ if result[-1] != "Z":
+ result += "Z"
+ return result
+
+ def to_internal_value(self, data):
+ TIME_FORMAT_LEN = len("00:00:00Z")
+ if len(data) == TIME_FORMAT_LEN:
+ try:
+ time.strptime(data, "%H:%M:%SZ")
+ except ValueError:
+ raise BadRequest(detail="Invalid time format, should be '00:00:00Z'")
+ else:
+ raise BadRequest(detail="Invalid time format, should be '00:00:00Z'")
+ return data
+
+
+class RouteIdField(fields.CharField):
+ def to_internal_value(self, data):
+ try:
+ channel_filter = ChannelFilter.objects.get(public_primary_key=data)
+ except ChannelFilter.DoesNotExist:
+ raise BadRequest(detail="Route does not exist")
+ return channel_filter
+
+ def to_representation(self, value):
+ if value is not None:
+ return value.public_primary_key
+ return value
+
+
+class UserIdField(fields.CharField):
+ def to_internal_value(self, data):
+ request = self.context.get("request", None)
+
+ user = User.objects.filter(organization=request.auth.organization, public_primary_key=data).first()
+ if user is None:
+ raise BadRequest(detail="User does not exist")
+ return user
+
+ def to_representation(self, value):
+ if value is not None:
+ return value.public_primary_key
+ return value
diff --git a/engine/common/api_helpers/exceptions.py b/engine/common/api_helpers/exceptions.py
new file mode 100644
index 0000000000..e0cc1500a9
--- /dev/null
+++ b/engine/common/api_helpers/exceptions.py
@@ -0,0 +1,25 @@
+from rest_framework.exceptions import APIException
+
+
+class BadRequest(APIException):
+ status_code = 400
+ default_detail = "Your browser sent a request that this server could not understand"
+ default_code = "bad_request"
+
+
+class Unauthorized(APIException):
+ status_code = 401
+ default_detail = "Request could not be authenticated"
+ default_code = "Unauthorized"
+
+
+class Forbidden(APIException):
+ status_code = 403
+ default_detail = "You do not have permission to perform this action"
+ default_code = "Forbidden"
+
+
+class Conflict(APIException):
+ status_code = 409
+ default_detail = "duplicate record found"
+ default_code = "Conflict"
diff --git a/engine/common/api_helpers/filters.py b/engine/common/api_helpers/filters.py
new file mode 100644
index 0000000000..ab5f9117d7
--- /dev/null
+++ b/engine/common/api_helpers/filters.py
@@ -0,0 +1,88 @@
+from datetime import datetime
+
+from django_filters import rest_framework as filters
+from django_filters.utils import handle_timezone
+
+from apps.user_management.models import Team
+from common.api_helpers.exceptions import BadRequest
+
+
+class DateRangeFilterMixin:
+ DATE_FORMAT = "%Y-%m-%dT%H:%M:%S"
+
+ def filter_date_range(self, queryset, name, value):
+ start_time, end_time = self.parse_custom_datetime_range(value)
+
+ filter_kwargs = {}
+ if start_time:
+ filter_kwargs[f"{name}__gte"] = start_time
+ if end_time:
+ filter_kwargs[f"{name}__lte"] = end_time
+ return queryset.filter(**filter_kwargs)
+
+ @classmethod
+ def parse_custom_datetime_range(cls, value):
+ if not value:
+ return None, None
+
+ date_entries = value.split("/")
+
+ if len(date_entries) != 2:
+ raise BadRequest(detail="Invalid range value")
+
+ try:
+ start_date = datetime.strptime(date_entries[0], cls.DATE_FORMAT)
+ end_date = datetime.strptime(date_entries[1], cls.DATE_FORMAT)
+ except ValueError:
+ raise BadRequest(detail="Invalid range value")
+
+ if start_date > end_date:
+ raise BadRequest(detail="Invalid range value")
+
+ start_date = handle_timezone(start_date, False)
+ end_date = handle_timezone(end_date, False)
+
+ return start_date, end_date
+
+
+class ModelFieldFilterMixin:
+ def filter_model_field(self, queryset, name, value):
+ if not value:
+ return queryset
+ lookup_kwargs = {f"{name}__in": value}
+ queryset = queryset.filter(**lookup_kwargs)
+ return queryset
+
+
+class ByTeamModelFieldFilterMixin:
+ FILTER_FIELD_NAME = "team"
+
+ def filter_model_field_with_single_value(self, queryset, name, value):
+ if not value:
+ return queryset
+ # ModelChoiceFilter
+ filter = self.filters[ByTeamModelFieldFilterMixin.FILTER_FIELD_NAME]
+ if filter.null_value == value:
+ lookup_kwargs = {f"{name}__isnull": True}
+ else:
+ lookup_kwargs = {f"{name}": value}
+ queryset = queryset.filter(**lookup_kwargs)
+ return queryset
+
+
+def get_team_queryset(request):
+ if request is None:
+ return Team.objects.none()
+
+ return request.user.organization.teams.all()
+
+
+class ByTeamFilter(ByTeamModelFieldFilterMixin, filters.FilterSet):
+ team_id = filters.ModelChoiceFilter(
+ field_name="team",
+ queryset=get_team_queryset,
+ to_field_name="public_primary_key",
+ null_label="noteam",
+ null_value="null",
+ method=ByTeamModelFieldFilterMixin.filter_model_field_with_single_value.__name__,
+ )
diff --git a/engine/common/api_helpers/mixins.py b/engine/common/api_helpers/mixins.py
new file mode 100644
index 0000000000..d121d2fdf7
--- /dev/null
+++ b/engine/common/api_helpers/mixins.py
@@ -0,0 +1,368 @@
+import json
+import math
+
+from django.core.exceptions import ObjectDoesNotExist
+from django.utils.functional import cached_property
+from jinja2.exceptions import TemplateRuntimeError
+from rest_framework import status
+from rest_framework.decorators import action
+from rest_framework.exceptions import NotFound, Throttled
+from rest_framework.response import Response
+
+from apps.alerts.incident_appearance.templaters import (
+ AlertEmailTemplater,
+ AlertPhoneCallTemplater,
+ AlertSlackTemplater,
+ AlertSmsTemplater,
+ AlertTelegramTemplater,
+ AlertWebTemplater,
+ TemplateLoader,
+)
+from apps.base.messaging import get_messaging_backends
+from apps.public_api.helpers import is_demo_token_request
+from common.api_helpers.exceptions import BadRequest
+from common.jinja_templater import apply_jinja_template
+
+
+class UpdateSerializerMixin:
+ serializer_class = None
+ update_serializer_class = None
+
+ def get_serializer_class(self):
+ if self.action in ["update", "partial_update"]:
+ return self.get_update_serializer_class()
+ return super().get_serializer_class()
+
+ def get_update_serializer_class(self):
+ assert self.update_serializer_class is not None, (
+ "'%s' should either include a `update_serializer_class` attribute,"
+ "or override the `get_update_serializer_class()` method." % self.__class__.__name__
+ )
+ return self.update_serializer_class
+
+
+# Use this mixin at the very left of list of inherited SerializersMixins
+class FilterSerializerMixin:
+ serializer_class = None
+ filter_serializer_class = None
+
+ def get_serializer_class(self):
+ is_filters_request = self.request.query_params.get("filters", "false") == "true"
+ if self.action in ["list"] and is_filters_request:
+ return self.get_filter_serializer_class()
+ else:
+ return super().get_serializer_class()
+
+ def get_filter_serializer_class(self):
+ assert self.filter_serializer_class is not None, (
+ "'%s' should either include a `filter_serializer_class` attribute,"
+ "or override the `get_update_serializer_class()` method." % self.__class__.__name__
+ )
+ return self.filter_serializer_class
+
+
+# Use this mixin at the very left of list of inherited SerializersMixins
+class ShortSerializerMixin:
+ serializer_class = None
+ short_serializer_class = None
+
+ def get_serializer_class(self):
+ is_short_request = self.request.query_params.get("short", "false") == "true"
+ if self.action in ["list"] and is_short_request:
+ return self.get_short_serializer_class()
+ else:
+ return super().get_serializer_class()
+
+ def get_short_serializer_class(self):
+ assert self.short_serializer_class is not None, (
+ "'%s' should either include a `short_serializer_class` attribute,"
+ "or override the `get_list_serializer_class()` method." % self.__class__.__name__
+ )
+ return self.short_serializer_class
+
+
+class CreateSerializerMixin:
+ serializer_class = None
+ create_serializer_class = None
+
+ def get_serializer_class(self):
+ if self.action in ["create", "destroy"]:
+ return self.get_create_serializer_class()
+ return super().get_serializer_class()
+
+ def get_create_serializer_class(self):
+ assert self.create_serializer_class is not None, (
+ "'%s' should either include a `create_serializer_class` attribute,"
+ "or override the `get_update_serializer_class()` method." % self.__class__.__name__
+ )
+ return self.create_serializer_class
+
+
+class ListSerializerMixin:
+ serializer_class = None
+ list_serializer_class = None
+
+ def get_serializer_class(self):
+ if self.action in ["retrieve", "list"]:
+ return self.get_list_serializer_class()
+ return super().get_serializer_class()
+
+ def get_list_serializer_class(self):
+ assert self.list_serializer_class is not None, (
+ "'%s' should either include a `list_serializer_class` attribute,"
+ "or override the `get_list_serializer_class()` method." % self.__class__.__name__
+ )
+ return self.list_serializer_class
+
+
+class EagerLoadingMixin:
+ @classmethod
+ def setup_eager_loading(cls, queryset):
+ if hasattr(cls, "SELECT_RELATED"):
+ queryset = queryset.select_related(*cls.SELECT_RELATED)
+ if hasattr(cls, "PREFETCH_RELATED"):
+ queryset = queryset.prefetch_related(*cls.PREFETCH_RELATED)
+ return queryset
+
+
+class DemoTokenMixin:
+ """
+ The view mixin for requests to public api with demo token authorization.
+ """
+
+ def dispatch(self, request, *args, **kwargs):
+ """
+ Overridden dispatch method of APIView
+ https://github.com/encode/django-rest-framework/blob/master/rest_framework/views.py#L485
+ """
+ method = request.method.lower()
+
+ if is_demo_token_request(request) and method in ["post", "put", "delete"]:
+ self.args = args
+ self.kwargs = kwargs
+ request = self.initialize_request(request, *args, **kwargs)
+ self.request = request
+
+ # there is a strange comment about this
+ # https://github.com/encode/django-rest-framework/blob/master/rest_framework/views.py#L494
+ self.headers = self.default_response_headers
+
+ try:
+ self.initial(request, *args, **kwargs)
+
+ """
+ check for allowed request methods
+
+ from APIView:
+ If `request.method` does not correspond to a handler method,
+ determine what kind of exception to raise.
+
+ def http_method_not_allowed(self, request, *args, **kwargs):
+ raise exceptions.MethodNotAllowed(request.method)
+ """
+
+ if method in self.http_method_names:
+ handler = getattr(self, method, self.http_method_not_allowed)
+ else:
+ handler = self.http_method_not_allowed
+
+ # function comparison explanation
+ # https://stackoverflow.com/a/18217024
+ if handler == self.http_method_not_allowed:
+ response = handler(request, *args, **kwargs)
+
+ elif method == "post":
+ # It excludes a real instance creation.
+ # It returns the instance with public primary key
+ # is equal to demo_default_id
+ instance = self.model._default_manager.get(public_primary_key=self.demo_default_id)
+ serializer = self.get_serializer(instance)
+ headers = self.get_success_headers(serializer.data)
+ response = Response(data=serializer.data, status=status.HTTP_201_CREATED, headers=headers)
+
+ elif method == "put":
+ # It excludes a instance update.
+ # It returns the instance with public primary key
+ # is equal to demo_default_id
+ instance = self.get_object()
+ serializer = self.get_serializer(instance)
+ headers = self.get_success_headers(serializer.data)
+ response = Response(data=serializer.data, status=status.HTTP_200_OK, headers=headers)
+
+ elif method == "delete":
+ # In this case we return nothing just success response.
+ response = Response(status=status.HTTP_204_NO_CONTENT)
+
+ except Exception as exc:
+ response = self.handle_exception(exc)
+
+ self.response = self.finalize_response(request, response, *args, **kwargs)
+ return self.response
+
+ return super().dispatch(request, *args, **kwargs)
+
+
+class RateLimitHeadersMixin:
+ # This mixin add RateLimit-Reset header to RateLimited response
+ def handle_exception(self, exc):
+ if isinstance(exc, Throttled):
+ if exc.wait is not None:
+ wait = f"{math.ceil(exc.wait)}"
+ else:
+ # if wait is none use maximum wait delay.
+ # This case can be reproduced if decrease ratelimit when self.history is not empty
+ wait = f"{350}"
+ self.headers["RateLimit-Reset"] = wait
+ return super().handle_exception(exc)
+
+
+class OrderedModelSerializerMixin:
+ def _change_position(self, order, instance):
+ if order is not None:
+ if order >= 0:
+ instance.to(order)
+ elif order == -1:
+ instance.bottom()
+ else:
+ raise BadRequest(detail="Invalid value for position field")
+
+ def _validate_order(self, order, filter_kwargs):
+ if order is not None and (self.instance is None or self.instance.order != order):
+ last_instance = self.Meta.model.objects.filter(**filter_kwargs).order_by("order").last()
+ max_order = last_instance.order if last_instance else -1
+ if self.instance is None:
+ max_order += 1
+ if order > max_order:
+ raise BadRequest(detail="Invalid value for position field")
+
+
+class PublicPrimaryKeyMixin:
+ def get_object(self):
+ pk = self.kwargs["pk"]
+ queryset = self.filter_queryset(self.get_queryset())
+
+ try:
+ obj = queryset.get(public_primary_key=pk)
+ except ObjectDoesNotExist:
+ raise NotFound
+
+ # May raise a permission denied
+ self.check_object_permissions(self.request, obj)
+
+ return obj
+
+
+# TODO: move to separate file
+SLACK = "slack"
+WEB = "web"
+PHONE_CALL = "phone_call"
+SMS = "sms"
+EMAIL = "email"
+TELEGRAM = "telegram"
+NOTIFICATION_CHANNEL_OPTIONS = [SLACK, WEB, PHONE_CALL, SMS, EMAIL, TELEGRAM]
+TITLE = "title"
+MESSAGE = "message"
+IMAGE_URL = "image_url"
+RESOLVE_CONDITION = "resolve_condition"
+ACKNOWLEDGE_CONDITION = "acknowledge_condition"
+GROUPING_ID = "grouping_id"
+SOURCE_LINK = "source_link"
+TEMPLATE_NAME_OPTIONS = [TITLE, MESSAGE, IMAGE_URL, RESOLVE_CONDITION, ACKNOWLEDGE_CONDITION, GROUPING_ID, SOURCE_LINK]
+NOTIFICATION_CHANNEL_TO_TEMPLATER_MAP = {
+ SLACK: AlertSlackTemplater,
+ WEB: AlertWebTemplater,
+ PHONE_CALL: AlertPhoneCallTemplater,
+ SMS: AlertSmsTemplater,
+ EMAIL: AlertEmailTemplater,
+ TELEGRAM: AlertTelegramTemplater,
+}
+
+# add additionally supported messaging backends
+for backend_id, backend in get_messaging_backends():
+ if backend.templater is not None:
+ backend_slug = backend_id.lower()
+ NOTIFICATION_CHANNEL_OPTIONS.append(backend_slug)
+ NOTIFICATION_CHANNEL_TO_TEMPLATER_MAP[backend_slug] = backend.get_templater_class()
+
+TEMPLATE_NAMES_ONLY_WITH_NOTIFICATION_CHANNEL = [TITLE, MESSAGE, IMAGE_URL]
+TEMPLATE_NAMES_WITHOUT_NOTIFICATION_CHANNEL = [RESOLVE_CONDITION, ACKNOWLEDGE_CONDITION, GROUPING_ID, SOURCE_LINK]
+
+
+class PreviewTemplateMixin:
+ @action(methods=["post"], detail=True)
+ def preview_template(self, request, pk):
+ template_body = request.data.get("template_body", None)
+ template_name = request.data.get("template_name", None)
+
+ if template_body is None or template_name is None:
+ response = {"preview": None}
+ return Response(response, status=status.HTTP_200_OK)
+
+ notification_channel, attr_name = self.parse_name_and_notification_channel(template_name)
+ if attr_name is None:
+ raise BadRequest(detail={"template_name": "Attr name is required"})
+ if attr_name not in TEMPLATE_NAME_OPTIONS:
+ raise BadRequest(detail={"template_name": "Unknown attr name"})
+ if attr_name in TEMPLATE_NAMES_ONLY_WITH_NOTIFICATION_CHANNEL:
+ if notification_channel is None:
+ raise BadRequest(detail={"notification_channel": "notification_channel is required"})
+ if notification_channel not in NOTIFICATION_CHANNEL_OPTIONS:
+ raise BadRequest(detail={"notification_channel": "Unknown notification_channel"})
+
+ alert_to_template = self.get_alert_to_template()
+ if alert_to_template is None:
+ raise BadRequest(detail="Alert to preview does not exist")
+
+ if attr_name in TEMPLATE_NAMES_ONLY_WITH_NOTIFICATION_CHANNEL:
+
+ class PreviewTemplateLoader(TemplateLoader):
+ def get_attr_template(self, attr, alert_receive_channel, render_for=None):
+ if attr == attr_name and render_for == notification_channel:
+ return template_body
+ else:
+ return super().get_attr_template(attr, alert_receive_channel, render_for)
+
+ templater_cls = NOTIFICATION_CHANNEL_TO_TEMPLATER_MAP[notification_channel]
+ templater = templater_cls(alert_to_template)
+ templater.template_manager = PreviewTemplateLoader()
+ try:
+ templated_alert = templater.render()
+ except TemplateRuntimeError:
+ raise BadRequest(detail={"template_body": "Invalid template syntax"})
+
+ templated_attr = getattr(templated_alert, attr_name)
+
+ elif attr_name in TEMPLATE_NAMES_WITHOUT_NOTIFICATION_CHANNEL:
+ templated_attr, _ = apply_jinja_template(template_body, payload=alert_to_template.raw_request_data)
+ else:
+ templated_attr = None
+ response = {"preview": templated_attr}
+ return Response(response, status=status.HTTP_200_OK)
+
+ def get_alert_to_template(self):
+ raise NotImplementedError
+
+ @staticmethod
+ def parse_name_and_notification_channel(template_param):
+ template_param = template_param.replace("_template", "")
+ attr_name = None
+ destination = None
+ if template_param.startswith(tuple(TEMPLATE_NAMES_WITHOUT_NOTIFICATION_CHANNEL)):
+ attr_name = template_param
+ elif template_param.startswith(tuple(NOTIFICATION_CHANNEL_OPTIONS)):
+ for notification_channel in NOTIFICATION_CHANNEL_OPTIONS:
+ if template_param.startswith(notification_channel):
+ destination = notification_channel
+ attr_name = template_param[len(destination) + 1 :]
+ break
+ return destination, attr_name
+
+
+class GrafanaHeadersMixin:
+ @cached_property
+ def grafana_context(self) -> dict:
+ return json.loads(self.request.headers.get("X-Grafana-Context"))
+
+ @cached_property
+ def instance_context(self) -> dict:
+ return json.loads(self.request.headers["X-Instance-Context"])
diff --git a/engine/common/api_helpers/optional_slash_router.py b/engine/common/api_helpers/optional_slash_router.py
new file mode 100644
index 0000000000..86582f3a00
--- /dev/null
+++ b/engine/common/api_helpers/optional_slash_router.py
@@ -0,0 +1,21 @@
+from typing import Optional
+
+from django.urls import URLPattern, re_path
+from django.views import View
+from rest_framework import routers
+
+
+class OptionalSlashRouter(routers.SimpleRouter):
+ """
+ A router with optional trailing slash at the end
+ APIRouter().register("users", ...) will match both "users" and "users/"
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.trailing_slash = "/?"
+
+
+def optional_slash_path(route: str, view: View, name: Optional[str] = None) -> URLPattern:
+ regex_route = "^{}/?$".format(route)
+ return re_path(route=regex_route, view=view, name=name)
diff --git a/engine/common/api_helpers/paginators.py b/engine/common/api_helpers/paginators.py
new file mode 100644
index 0000000000..023f2294c3
--- /dev/null
+++ b/engine/common/api_helpers/paginators.py
@@ -0,0 +1,13 @@
+from rest_framework.pagination import PageNumberPagination
+
+
+class HundredPageSizePaginator(PageNumberPagination):
+ page_size = 100
+
+
+class FiftyPageSizePaginator(PageNumberPagination):
+ page_size = 50
+
+
+class TwentyFivePageSizePaginator(PageNumberPagination):
+ page_size = 25
diff --git a/engine/common/api_helpers/utils.py b/engine/common/api_helpers/utils.py
new file mode 100644
index 0000000000..1f8ecc547b
--- /dev/null
+++ b/engine/common/api_helpers/utils.py
@@ -0,0 +1,52 @@
+import requests
+from django.conf import settings
+from icalendar import Calendar
+from rest_framework import serializers
+
+
+class CurrentOrganizationDefault:
+ """
+ Utility class to get the current organization right from the serializer field.
+ In pair with serializers.HiddenField gives an ability to create objects
+ without overriding perform_create on the model, while respecting unique_together constraints.
+ Example: organization = serializers.HiddenField(default=CurrentOrganizationDefault())
+ """
+
+ def set_context(self, serializer_field):
+ self.organization = serializer_field.context["request"].auth.organization
+
+ def __call__(self):
+ return self.organization
+
+ def __repr__(self):
+ return "%s()" % self.__class__.__name__
+
+
+class CurrentTeamDefault:
+ """
+ Utility class to get the current team right from the serializer field.
+ """
+
+ def set_context(self, serializer_field):
+ self.team = serializer_field.context["request"].user.current_team
+
+ def __call__(self):
+ return self.team
+
+ def __repr__(self):
+ return "%s()" % self.__class__.__name__
+
+
+def validate_ical_url(url):
+ if url:
+ if settings.BASE_URL in url:
+ raise serializers.ValidationError("Potential self-reference")
+ try:
+ ical_file = requests.get(url).text
+ Calendar.from_ical(ical_file)
+ except requests.exceptions.RequestException:
+ raise serializers.ValidationError("Ical download failed")
+ except ValueError:
+ raise serializers.ValidationError("Ical parse failed")
+ return url
+ return None
diff --git a/engine/common/constants/__init__.py b/engine/common/constants/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/common/constants/role.py b/engine/common/constants/role.py
new file mode 100644
index 0000000000..69a05d044e
--- /dev/null
+++ b/engine/common/constants/role.py
@@ -0,0 +1,11 @@
+from enum import IntEnum
+
+
+class Role(IntEnum):
+ ADMIN = 0
+ EDITOR = 1
+ VIEWER = 2
+
+ @classmethod
+ def choices(cls):
+ return tuple((option.value, option.name) for option in cls)
diff --git a/engine/common/constants/slack_auth.py b/engine/common/constants/slack_auth.py
new file mode 100644
index 0000000000..12a8b319c2
--- /dev/null
+++ b/engine/common/constants/slack_auth.py
@@ -0,0 +1,5 @@
+REDIRECT_AFTER_SLACK_INSTALL = "redirect_after_slack_install"
+# slack errors flags
+SLACK_AUTH_WRONG_WORKSPACE_ERROR = "wrong_workspace"
+SLACK_AUTH_SLACK_USER_ALREADY_CONNECTED_ERROR = "user_already_connected"
+SLACK_AUTH_FAILED = "auth_failed"
diff --git a/engine/common/custom_celery_tasks/__init__.py b/engine/common/custom_celery_tasks/__init__.py
new file mode 100644
index 0000000000..aa45df5bf2
--- /dev/null
+++ b/engine/common/custom_celery_tasks/__init__.py
@@ -0,0 +1 @@
+from .dedicated_queue_retry_task import shared_dedicated_queue_retry_task # noqa
diff --git a/engine/common/custom_celery_tasks/create_alert_base_task.py b/engine/common/custom_celery_tasks/create_alert_base_task.py
new file mode 100644
index 0000000000..2368a62a36
--- /dev/null
+++ b/engine/common/custom_celery_tasks/create_alert_base_task.py
@@ -0,0 +1,8 @@
+from abc import ABC
+
+from common.custom_celery_tasks.dedicated_queue_retry_task import DedicatedQueueRetryTask
+from common.custom_celery_tasks.safe_to_broker_outage_task import SafeToBrokerOutageTask
+
+
+class CreateAlertBaseTask(SafeToBrokerOutageTask, DedicatedQueueRetryTask, ABC):
+ pass
diff --git a/engine/common/custom_celery_tasks/dedicated_queue_retry_task.py b/engine/common/custom_celery_tasks/dedicated_queue_retry_task.py
new file mode 100644
index 0000000000..0c385a8d65
--- /dev/null
+++ b/engine/common/custom_celery_tasks/dedicated_queue_retry_task.py
@@ -0,0 +1,30 @@
+from celery import Task, shared_task
+
+RETRY_QUEUE = "retry"
+
+
+class DedicatedQueueRetryTask(Task):
+ """
+ Custom task sends all retried task to the dedicated retry queue.
+ Is is needed to not to overload regular (high, medium, low) queues with retried tasks.
+ """
+
+ def retry(
+ self, args=None, kwargs=None, exc=None, throw=True, eta=None, countdown=None, max_retries=None, **options
+ ):
+ # Just call retry with queue argument
+ return super().retry(
+ args=args,
+ kwargs=kwargs,
+ exc=exc,
+ throw=throw,
+ eta=eta,
+ countdown=countdown,
+ max_retries=max_retries,
+ queue=RETRY_QUEUE,
+ **options,
+ )
+
+
+def shared_dedicated_queue_retry_task(*args, **kwargs):
+ return shared_task(*args, base=DedicatedQueueRetryTask, **kwargs)
diff --git a/engine/common/custom_celery_tasks/safe_to_broker_outage_task.py b/engine/common/custom_celery_tasks/safe_to_broker_outage_task.py
new file mode 100644
index 0000000000..019dd06743
--- /dev/null
+++ b/engine/common/custom_celery_tasks/safe_to_broker_outage_task.py
@@ -0,0 +1,21 @@
+from abc import ABC
+
+from celery import Task
+from kombu.exceptions import OperationalError
+
+from apps.base.models import FailedToInvokeCeleryTask
+
+
+class SafeToBrokerOutageTask(Task, ABC):
+ """
+ Dumps task name and parameters to a database when broker is not available.
+ """
+
+ def apply_async(
+ self, args=None, kwargs=None, task_id=None, producer=None, link=None, link_error=None, shadow=None, **options
+ ):
+ try:
+ return super().apply_async(args, kwargs, task_id, producer, link, link_error, shadow, **options)
+ except OperationalError:
+ parameters = {"args": args, "kwargs": kwargs, "options": options}
+ FailedToInvokeCeleryTask.objects.create(name=self.name, parameters=parameters)
diff --git a/engine/common/exceptions/__init__.py b/engine/common/exceptions/__init__.py
new file mode 100644
index 0000000000..d191b8f426
--- /dev/null
+++ b/engine/common/exceptions/__init__.py
@@ -0,0 +1 @@
+from .exceptions import MaintenanceCouldNotBeStartedError, TeamCanNotBeChangedError, UnableToSendDemoAlert # noqa: F401
diff --git a/engine/common/exceptions/exceptions.py b/engine/common/exceptions/exceptions.py
new file mode 100644
index 0000000000..69318bd553
--- /dev/null
+++ b/engine/common/exceptions/exceptions.py
@@ -0,0 +1,19 @@
+class OperationCouldNotBePerformedError(Exception):
+ """
+ Indicates that operation could not be performed due to to application logic.
+ E.g. you can't ack resolved AlertGroup
+ """
+
+ pass
+
+
+class MaintenanceCouldNotBeStartedError(OperationCouldNotBePerformedError):
+ pass
+
+
+class TeamCanNotBeChangedError(OperationCouldNotBePerformedError):
+ pass
+
+
+class UnableToSendDemoAlert(OperationCouldNotBePerformedError):
+ pass
diff --git a/engine/common/jinja_templater/__init__.py b/engine/common/jinja_templater/__init__.py
new file mode 100644
index 0000000000..9570e031e5
--- /dev/null
+++ b/engine/common/jinja_templater/__init__.py
@@ -0,0 +1,2 @@
+from .apply_jinja_template import apply_jinja_template # noqa: F401
+from .jinja_template_env import jinja_template_env # noqa: F401
diff --git a/engine/common/jinja_templater/apply_jinja_template.py b/engine/common/jinja_templater/apply_jinja_template.py
new file mode 100644
index 0000000000..fb00ab3108
--- /dev/null
+++ b/engine/common/jinja_templater/apply_jinja_template.py
@@ -0,0 +1,12 @@
+from jinja2 import TemplateSyntaxError, UndefinedError
+
+from .jinja_template_env import jinja_template_env
+
+
+def apply_jinja_template(template, payload=None, **kwargs):
+ try:
+ template = jinja_template_env.from_string(template)
+ result = template.render(payload=payload, **kwargs)
+ return result, True
+ except (UndefinedError, TypeError, ValueError, KeyError, TemplateSyntaxError):
+ return None, False
diff --git a/engine/common/jinja_templater/filters.py b/engine/common/jinja_templater/filters.py
new file mode 100644
index 0000000000..52c59f0713
--- /dev/null
+++ b/engine/common/jinja_templater/filters.py
@@ -0,0 +1,24 @@
+import json
+
+from django.utils.dateparse import parse_datetime
+
+
+def datetimeformat(value, format="%H:%M / %d-%m-%Y"):
+ try:
+ return value.strftime(format)
+ except AttributeError:
+ return None
+
+
+def iso8601_to_time(value):
+ try:
+ return parse_datetime(value)
+ except (ValueError, AttributeError, TypeError):
+ return None
+
+
+def to_pretty_json(value):
+ try:
+ return json.dumps(value, sort_keys=True, indent=4, separators=(",", ": "), ensure_ascii=False)
+ except (ValueError, AttributeError, TypeError):
+ return None
diff --git a/engine/common/jinja_templater/jinja_template_env.py b/engine/common/jinja_templater/jinja_template_env.py
new file mode 100644
index 0000000000..f151eac6b2
--- /dev/null
+++ b/engine/common/jinja_templater/jinja_template_env.py
@@ -0,0 +1,12 @@
+from django.utils import timezone
+from jinja2 import BaseLoader
+from jinja2.sandbox import SandboxedEnvironment
+
+from .filters import datetimeformat, iso8601_to_time, to_pretty_json
+
+jinja_template_env = SandboxedEnvironment(loader=BaseLoader())
+
+jinja_template_env.filters["datetimeformat"] = datetimeformat
+jinja_template_env.filters["iso8601_to_time"] = iso8601_to_time
+jinja_template_env.filters["tojson_pretty"] = to_pretty_json
+jinja_template_env.globals["time"] = timezone.now
diff --git a/engine/common/mixins/use_random_readonly_db_manager_mixin.py b/engine/common/mixins/use_random_readonly_db_manager_mixin.py
new file mode 100644
index 0000000000..46559aa414
--- /dev/null
+++ b/engine/common/mixins/use_random_readonly_db_manager_mixin.py
@@ -0,0 +1,21 @@
+import random
+
+from django.conf import settings
+
+
+class UseRandomReadonlyDbManagerMixin:
+ """
+ Use this Mixin in ModelManagers, when you want to use the random readonly replica
+ """
+
+ @property
+ def using_readonly_db(self):
+ """Select one of the readonly databases this QuerySet should execute against."""
+ if hasattr(settings, "READONLY_DATABASES") and len(settings.READONLY_DATABASES) > 0:
+ using_db = random.choice(list(settings.READONLY_DATABASES.keys()))
+ return self.using(using_db)
+ else:
+ # Use "default" database
+ # Django uses the database with the alias of default when no other database has been selected.
+ # https://docs.djangoproject.com/en/3.2/topics/db/multi-db/#defining-your-databases
+ return self.using("default")
diff --git a/engine/common/public_primary_keys.py b/engine/common/public_primary_keys.py
new file mode 100644
index 0000000000..07a8ca13b5
--- /dev/null
+++ b/engine/common/public_primary_keys.py
@@ -0,0 +1,66 @@
+import logging
+
+from django.conf import settings
+from django.core.exceptions import FieldError
+from django.utils.crypto import get_random_string
+
+logger = logging.getLogger(__name__)
+
+
+def generate_public_primary_key(prefix, length=settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH):
+ """It generates random string with prefix and length
+ :param prefix:
+ "U": ("user_management", "User"),
+ "O": ("user_management", "Organization"),
+ "T": ("user_management", "Team"),
+ "N": ("base", "UserNotificationPolicy"),
+ "C": ("alerts", "AlertReceiveChannel"),
+ "R": ("alerts", "ChannelFilter"),
+ "S": ("schedules", "OnCallSchedule"),
+ "E": ("alerts", "EscalationPolicy"),
+ "F": ("alerts", "EscalationChain"),
+ "I": ("alerts", "AlertGroup"),
+ "A": ("alerts", "Alert"),
+ "M": ("alerts", "ResolutionNote"),
+ "G": ("slack", "SlackUserGroup"),
+ "K": ("alerts", "CustomButton"),
+ "O": ("schedules", "CustomOnCallShift"),
+ "B": ("heartbeat", "IntegrationHeartBeat"),
+ "H": ("slack", "SlackChannel"),
+ "Z": ("telegram", "TelegramToOrganizationConnector"),
+ "L": ("base", "LiveSetting"),
+ "V": ("base", "OrganizationLogRecord"),
+ "X": ("extensions", "Other models from extensions apps"),
+ :param length:
+ :return:
+ """
+
+ return prefix + get_random_string(length=length, allowed_chars=settings.PUBLIC_PRIMARY_KEY_ALLOWED_CHARS)
+
+
+def increase_public_primary_key_length(failure_counter, prefix, model_name, max_attempt_count=5):
+ """
+ Another yet helper which generates random string with larger length
+ when previous public_primary_key exists
+
+ :param failure_counter:
+ :param prefix:
+ :param model_name:
+ :param max_attempt_count: When attempt count is more then max_attempt_count we'll get the exception
+ :return:
+ """
+
+ if failure_counter < max_attempt_count:
+ logger.warning(
+ f"Let's try increase a {model_name} "
+ f"new_public_primary_key length "
+ f"({failure_counter + 1}/{max_attempt_count}) times"
+ )
+
+ return generate_public_primary_key(
+ prefix=prefix, length=settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + failure_counter
+ )
+ else:
+ raise FieldError(
+ f"A count of {model_name} new_public_primary_key generation " f"attempts is more than {max_attempt_count}!"
+ )
diff --git a/engine/common/tests/__init__.py b/engine/common/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/common/tests/test_clean_markup.py b/engine/common/tests/test_clean_markup.py
new file mode 100644
index 0000000000..ec468cecdb
--- /dev/null
+++ b/engine/common/tests/test_clean_markup.py
@@ -0,0 +1,44 @@
+from common.utils import clean_markup
+
+
+def test_clean_code_blocks_name():
+ original = "Tada! ```Tadada!``` `Tadadada!`"
+ expected = "Tada! Tadada! Tadadada!"
+ assert clean_markup(original) == expected
+
+
+def test_clean_visual_basics():
+ original = "~Stroke~ *Bold* _Italic_ ~Word ~"
+ expected = "Stroke Bold Italic ~Word ~"
+ assert clean_markup(original) == expected
+
+
+def test_clean_block_quotes():
+ original = (
+ "This is unquoted text\n"
+ "> This is quoted text\n"
+ "> This is still quoted text\n"
+ "This is unquoted text again"
+ )
+ expected = (
+ "This is unquoted text\n"
+ "> This is quoted text\n"
+ "> This is still quoted text\n"
+ "This is unquoted text again"
+ )
+
+ assert clean_markup(original) == expected
+
+
+def test_clean_link():
+ original = ""
+ expected = "http://www.foo.com"
+ assert clean_markup(original) == expected
+
+
+def test_clean_mailto():
+ # email
+ original = ""
+ expected = "bob@example.com"
+
+ assert clean_markup(original) == expected
diff --git a/engine/common/tests/test_urlize.py b/engine/common/tests/test_urlize.py
new file mode 100644
index 0000000000..e239638dc1
--- /dev/null
+++ b/engine/common/tests/test_urlize.py
@@ -0,0 +1,25 @@
+from common.utils import urlize_with_respect_to_a
+
+
+def test_urlize_will_not_mutate_text_without_links():
+ original = "Text without link"
+ expected = original
+ assert urlize_with_respect_to_a(original) == expected
+
+
+def test_urlize_will_not_mutate_text_with_link_in_a():
+ original = 'amixr website '
+ expected = original
+ assert urlize_with_respect_to_a(original) == expected
+
+
+def test_urlize_will_wrap_link():
+ original = "https://amixr.io/"
+ expected = 'https://amixr.io/ '
+ assert urlize_with_respect_to_a(original) == expected
+
+
+def test_urlize_will_not_wrap_link_inside_a():
+ original = 'https://amixr.io/ '
+ expected = original
+ assert urlize_with_respect_to_a(original) == expected
diff --git a/engine/common/utils.py b/engine/common/utils.py
new file mode 100644
index 0000000000..4b9ef9c126
--- /dev/null
+++ b/engine/common/utils.py
@@ -0,0 +1,228 @@
+import functools
+import html
+import os
+import random
+import re
+import time
+from functools import reduce
+
+import factory
+import markdown2
+from bs4 import BeautifulSoup
+from celery.utils.log import get_task_logger
+from celery.utils.time import get_exponential_backoff_interval
+from django.utils.html import urlize
+
+logger = get_task_logger(__name__)
+
+
+# Faker that always returns unique values
+class UniqueFaker(factory.Faker):
+ @classmethod
+ def _get_faker(cls, locale=None):
+ return super()._get_faker(locale).unique
+
+
+# Context manager for tasks that are intended to retry
+# It will rerun the whole task if exception(s) exc has happened
+class OkToRetry:
+ def __init__(self, task, exc, num_retries=None, compute_countdown=None, allow_jitter=True):
+ self.task = task
+ self.num_retries = num_retries
+ self.compute_countdown = compute_countdown
+ self.allow_jitter = allow_jitter
+
+ if not isinstance(exc, (list, tuple)):
+ exc = [exc]
+ self.exc = exc
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if exc_type is not None and any(issubclass(exc_type, exc) for exc in self.exc):
+ if self.num_retries is None or self.task.request.retries + 1 <= self.num_retries:
+ countdown = self.get_countdown(exc_val)
+
+ logger.warning(
+ f"Retrying task gracefully in {countdown} seconds due to {exc_type.__name__}. "
+ f"args: {self.task.request.args}, kwargs: {self.task.request.kwargs}"
+ )
+ self.rerun_task(countdown)
+
+ return True
+
+ def get_countdown(self, exc_val):
+ if self.compute_countdown is not None:
+ countdown = self.compute_countdown(exc_val)
+ if self.allow_jitter is True:
+ countdown = countdown + random.uniform(0, 2)
+ else:
+ countdown = get_exponential_backoff_interval(
+ factor=self.task.retry_backoff, retries=self.task.request.retries, maximum=600, full_jitter=True
+ )
+ return countdown
+
+ def rerun_task(self, countdown):
+ self.task.apply_async(
+ self.task.request.args,
+ kwargs=self.task.request.kwargs,
+ retries=self.task.request.retries + 1,
+ countdown=countdown,
+ )
+
+
+# lru cache version with addition of timeout.
+# Timeout added to not to occupy memory with too old values
+def timed_lru_cache(timeout: int, maxsize: int = 128, typed: bool = False):
+ def wrapper_cache(func):
+ func = functools.lru_cache(maxsize=maxsize, typed=typed)(func)
+ func.delta = timeout * 10**9
+ func.expiration = time.monotonic_ns() + func.delta
+
+ @functools.wraps(func)
+ def wrapped_func(*args, **kwargs):
+ if time.monotonic_ns() >= func.expiration:
+ func.cache_clear()
+ func.expiration = time.monotonic_ns() + func.delta
+ return func(*args, **kwargs)
+
+ wrapped_func.cache_info = func.cache_info
+ wrapped_func.cache_clear = func.cache_clear
+ return wrapped_func
+
+ return wrapper_cache
+
+
+def getenv_boolean(variable_name: str, default: bool) -> bool:
+ value = os.environ.get(variable_name)
+ if value is None:
+ return default
+
+ return value.lower() in ("true", "1")
+
+
+def batch_queryset(qs, batch_size=1000):
+ qs_count = qs.count()
+ for start in range(0, qs_count, batch_size):
+ end = min(start + batch_size, qs_count)
+ yield qs[start:end]
+
+
+def is_regex_valid(regex) -> bool:
+ try:
+ re.compile(regex)
+ return True
+ except re.error:
+ return False
+
+
+def isoformat_with_tz_suffix(value):
+ """
+ Default python datetime.isoformat() return tz offset like +00:00 instead of military tz suffix (e.g.Z for UTC)".
+ On the other hand DRF returns datetime with military tz suffix.
+ This utility function exists to return consistent datetime string in api.
+ Is is copied from DRF DateTimeField.to_representation
+ """
+ value = value.isoformat()
+ if value.endswith("+00:00"):
+ value = value[:-6] + "Z"
+ return value
+
+
+def is_string_with_visible_characters(string):
+ return type(string) == str and not string.isspace() and not string == ""
+
+
+def str_or_backup(string, backup):
+ return string if is_string_with_visible_characters(string) else backup
+
+
+def clean_html(text):
+ text = "".join(BeautifulSoup(text, features="html.parser").find_all(text=True))
+ return text
+
+
+def convert_slack_md_to_html(text):
+ text = re.sub(r"\*", "**", text)
+ return convert_md_to_html(text)
+
+
+def convert_md_to_html(text):
+ text = markdown2.markdown(
+ text,
+ extras=[
+ "cuddled-lists",
+ "code-friendly", # Disable _ and __ for em and strong.
+ # This gives us and tags for ```-fenced blocks
+ "fenced-code-blocks",
+ "pyshell",
+ ],
+ ).strip()
+ # Special handling cases for lists
+ text = text.replace("\n\n", "")
+ text = text.replace("\n", " ")
+ # Special handling cases for newlines
+ text = text.replace("\n", " ")
+ return text
+
+
+def clean_markup(text):
+ html = markdown2.markdown(text, extras=["cuddled-lists", "fenced-code-blocks", "pyshell"]).strip()
+ cleaned = clean_html(html)
+ stroke_matches = re.findall(r"~\w+~", cleaned)
+ for stroke_match in stroke_matches:
+ cleaned_match = stroke_match.strip("~")
+ cleaned = cleaned.replace(stroke_match, cleaned_match)
+ return cleaned
+
+
+def escape_html(text):
+ return html.escape(text)
+
+
+def urlize_with_respect_to_a(html):
+ """
+ Wrap links into tag if not already
+ """
+ soup = BeautifulSoup(html, features="html.parser")
+ textNodes = soup.find_all(text=True)
+ for textNode in textNodes:
+ if textNode.parent and getattr(textNode.parent, "name") == "a":
+ continue
+ urlizedText = urlize(textNode)
+ textNode.replaceWith(BeautifulSoup(urlizedText, features="html.parser"))
+
+ return str(soup)
+
+
+url_re = re.compile(
+ r"""(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\s()<>{}\[\]]+|\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\))+(?:\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’])|(?:(? default:
+ text = text[:default]
+ text += "..."
+ return text
+
+
+class NoDefaultProvided(object):
+ pass
+
+
+def getattrd(obj, name, default=NoDefaultProvided):
+ """
+ Same as getattr(), but allows dot notation lookup
+ Discussed in:
+ http://stackoverflow.com/questions/11975781
+ """
+
+ try:
+ return reduce(getattr, name.split("."), obj)
+ except AttributeError as e:
+ if default != NoDefaultProvided:
+ return default
+ raise e
diff --git a/engine/conftest.py b/engine/conftest.py
new file mode 100644
index 0000000000..b6001532c6
--- /dev/null
+++ b/engine/conftest.py
@@ -0,0 +1,678 @@
+import json
+import sys
+import uuid
+from importlib import import_module, reload
+
+import pytest
+from django.db.models.signals import post_save
+from django.urls import clear_url_caches
+from pytest_factoryboy import register
+from rest_framework.test import APIClient
+from telegram import Bot
+
+from apps.alerts.models import (
+ Alert,
+ AlertGroupLogRecord,
+ AlertReceiveChannel,
+ MaintainableObject,
+ ResolutionNote,
+ listen_for_alert_model_save,
+ listen_for_alertgrouplogrecord,
+ listen_for_alertreceivechannel_model_save,
+)
+from apps.alerts.signals import user_notification_action_triggered_signal
+from apps.alerts.tests.factories import (
+ AlertFactory,
+ AlertGroupFactory,
+ AlertGroupLogRecordFactory,
+ AlertReceiveChannelFactory,
+ ChannelFilterFactory,
+ CustomActionFactory,
+ EscalationChainFactory,
+ EscalationPolicyFactory,
+ InvitationFactory,
+ ResolutionNoteFactory,
+ ResolutionNoteSlackMessageFactory,
+)
+from apps.auth_token.models import ApiAuthToken, PluginAuthToken
+from apps.base.models.user_notification_policy_log_record import (
+ UserNotificationPolicyLogRecord,
+ listen_for_usernotificationpolicylogrecord_model_save,
+)
+from apps.base.tests.factories import (
+ LiveSettingFactory,
+ OrganizationLogRecordFactory,
+ UserNotificationPolicyFactory,
+ UserNotificationPolicyLogRecordFactory,
+)
+from apps.heartbeat.tests.factories import IntegrationHeartBeatFactory
+from apps.schedules.tests.factories import (
+ CustomOnCallShiftFactory,
+ OnCallScheduleCalendarFactory,
+ OnCallScheduleFactory,
+ OnCallScheduleICalFactory,
+)
+from apps.slack.slack_client import SlackClientWithErrorHandling
+from apps.slack.tests.factories import (
+ SlackActionRecordFactory,
+ SlackChannelFactory,
+ SlackMessageFactory,
+ SlackTeamIdentityFactory,
+ SlackUserGroupFactory,
+ SlackUserIdentityFactory,
+)
+from apps.telegram.tests.factories import (
+ TelegramChannelFactory,
+ TelegramChannelVerificationCodeFactory,
+ TelegramMessageFactory,
+ TelegramToUserConnectorFactory,
+ TelegramVerificationCodeFactory,
+)
+from apps.twilioapp.tests.factories import PhoneCallFactory, SMSFactory
+from apps.user_management.organization_log_creator import OrganizationLogType
+from apps.user_management.tests.factories import OrganizationFactory, TeamFactory, UserFactory
+from common.constants.role import Role
+
+register(OrganizationFactory)
+register(UserFactory)
+register(TeamFactory)
+
+register(OrganizationLogRecordFactory)
+
+register(AlertReceiveChannelFactory)
+register(ChannelFilterFactory)
+register(EscalationPolicyFactory)
+register(OnCallScheduleICalFactory)
+register(OnCallScheduleCalendarFactory)
+register(CustomOnCallShiftFactory)
+register(AlertFactory)
+register(AlertGroupFactory)
+register(AlertGroupLogRecordFactory)
+register(InvitationFactory)
+register(CustomActionFactory)
+register(SlackUserGroupFactory)
+
+register(SlackUserIdentityFactory)
+register(SlackTeamIdentityFactory)
+register(SlackMessageFactory)
+register(SlackActionRecordFactory)
+
+register(TelegramToUserConnectorFactory)
+register(TelegramChannelFactory)
+register(TelegramVerificationCodeFactory)
+register(TelegramChannelVerificationCodeFactory)
+register(TelegramMessageFactory)
+
+register(ResolutionNoteSlackMessageFactory)
+
+register(PhoneCallFactory)
+register(SMSFactory)
+# register(EmailMessageFactory)
+
+register(IntegrationHeartBeatFactory)
+
+register(LiveSettingFactory)
+
+
+@pytest.fixture(autouse=True)
+def mock_slack_api_call(monkeypatch):
+ def mock_api_call(*args, **kwargs):
+ return {
+ "status": 200,
+ "usergroups": [],
+ "channel": {"id": "TEST_CHANNEL_ID"},
+ "user": {
+ "name": "TEST_SLACK_LOGIN",
+ "real_name": "TEST_SLACK_NAME",
+ "profile": {"image_512": "TEST_SLACK_IMAGE"},
+ },
+ "team": {"name": "TEST_TEAM"},
+ }
+
+ monkeypatch.setattr(SlackClientWithErrorHandling, "api_call", mock_api_call)
+
+
+@pytest.fixture(autouse=True)
+def mock_telegram_bot_username(monkeypatch):
+ def mock_username(*args, **kwargs):
+ return "amixr_bot"
+
+ monkeypatch.setattr(Bot, "username", mock_username)
+
+
+@pytest.fixture
+def make_organization():
+ def _make_organization(**kwargs):
+ organization = OrganizationFactory(**kwargs)
+
+ return organization
+
+ return _make_organization
+
+
+@pytest.fixture
+def make_user_for_organization():
+ def _make_user_for_organization(organization, role=Role.ADMIN, **kwargs):
+ user = UserFactory(organization=organization, role=role, **kwargs)
+ return user
+
+ return _make_user_for_organization
+
+
+@pytest.fixture
+def make_token_for_organization():
+ def _make_token_for_organization(organization):
+ return PluginAuthToken.create_auth_token(organization)
+
+ return _make_token_for_organization
+
+
+@pytest.fixture
+def make_public_api_token():
+ def _make_public_api_token(user, organization, name="test_api_token"):
+ return ApiAuthToken.create_auth_token(user, organization, name)
+
+ return _make_public_api_token
+
+
+@pytest.fixture
+def make_user_auth_headers():
+ def _make_user_auth_headers(user, token):
+ return {
+ "HTTP_X-Instance-Context": json.dumps(
+ {"stack_id": user.organization.stack_id, "org_id": user.organization.org_id}
+ ),
+ "HTTP_X-Grafana-Context": json.dumps({"UserId": user.user_id}),
+ "HTTP_AUTHORIZATION": f"{token}",
+ }
+
+ return _make_user_auth_headers
+
+
+@pytest.fixture
+def make_user():
+ def _make_user(role=Role.ADMIN, **kwargs):
+ user = UserFactory(role=role, **kwargs)
+
+ return user
+
+ return _make_user
+
+
+@pytest.fixture
+def make_organization_and_user(make_organization, make_user_for_organization):
+ def _make_organization_and_user(role=Role.ADMIN):
+ organization = make_organization()
+ user = make_user_for_organization(organization=organization, role=role)
+ return organization, user
+
+ return _make_organization_and_user
+
+
+@pytest.fixture
+def make_organization_and_user_with_slack_identities(
+ make_organization_with_slack_team_identity, make_user_with_slack_user_identity
+):
+ def _make_organization_and_user_with_slack_identities(role=Role.ADMIN):
+ organization, slack_team_identity = make_organization_with_slack_team_identity()
+ user, slack_user_identity = make_user_with_slack_user_identity(slack_team_identity, organization, role=role)
+
+ return organization, user, slack_team_identity, slack_user_identity
+
+ return _make_organization_and_user_with_slack_identities
+
+
+@pytest.fixture
+def make_user_with_slack_user_identity():
+ def _make_slack_user_identity_with_user(slack_team_identity, organization, role=Role.ADMIN, **kwargs):
+ slack_user_identity = SlackUserIdentityFactory(
+ slack_team_identity=slack_team_identity,
+ **kwargs,
+ )
+ user = UserFactory(slack_user_identity=slack_user_identity, organization=organization, role=role)
+ return user, slack_user_identity
+
+ return _make_slack_user_identity_with_user
+
+
+@pytest.fixture
+def make_organization_with_slack_team_identity(make_slack_team_identity):
+ def _make_slack_team_identity_with_organization(**kwargs):
+ slack_team_identity = make_slack_team_identity(**kwargs)
+ organization = OrganizationFactory(slack_team_identity=slack_team_identity)
+ return organization, slack_team_identity
+
+ return _make_slack_team_identity_with_organization
+
+
+@pytest.fixture
+def make_slack_team_identity():
+ def _make_slack_team_identity(**kwargs):
+ slack_team_identity = SlackTeamIdentityFactory(**kwargs)
+ return slack_team_identity
+
+ return _make_slack_team_identity
+
+
+@pytest.fixture
+def make_slack_user_identity():
+ def _make_slack_user_identity(**kwargs):
+ slack_user_identity = SlackUserIdentityFactory(**kwargs)
+ return slack_user_identity
+
+ return _make_slack_user_identity
+
+
+@pytest.fixture
+def make_slack_message():
+ def _make_slack_message(alert_group, **kwargs):
+ organization = alert_group.channel.organization
+ slack_message = SlackMessageFactory(
+ alert_group=alert_group,
+ organization=organization,
+ _slack_team_identity=organization.slack_team_identity,
+ **kwargs,
+ )
+ return slack_message
+
+ return _make_slack_message
+
+
+@pytest.fixture
+def make_slack_action_record():
+ def _make_slack_action_record(organization, user, **kwargs):
+ return SlackActionRecordFactory(organization=organization, user=user, **kwargs)
+
+ return _make_slack_action_record
+
+
+@pytest.fixture
+def client_with_user():
+ def _client_with_user(user):
+ """The client with logged in user"""
+
+ client = APIClient()
+ client.force_login(user)
+
+ return client
+
+ return _client_with_user
+
+
+@pytest.fixture
+def make_team():
+ def _make_team(organization, **kwargs):
+ team = TeamFactory(organization=organization, **kwargs)
+ return team
+
+ return _make_team
+
+
+@pytest.fixture
+def make_alert_receive_channel():
+ def _make_alert_receive_channel(organization, **kwargs):
+ if "integration" not in kwargs:
+ kwargs["integration"] = AlertReceiveChannel.INTEGRATION_GRAFANA
+ post_save.disconnect(listen_for_alertreceivechannel_model_save, sender=AlertReceiveChannel)
+ alert_receive_channel = AlertReceiveChannelFactory(organization=organization, **kwargs)
+ post_save.connect(listen_for_alertreceivechannel_model_save, sender=AlertReceiveChannel)
+ return alert_receive_channel
+
+ return _make_alert_receive_channel
+
+
+@pytest.fixture
+def make_channel_filter():
+ def _make_channel_filter(alert_receive_channel, filtering_term=None, **kwargs):
+ channel_filter = ChannelFilterFactory(
+ filtering_term=filtering_term,
+ alert_receive_channel=alert_receive_channel,
+ **kwargs,
+ )
+ return channel_filter
+
+ return _make_channel_filter
+
+
+@pytest.fixture
+def make_channel_filter_with_post_save():
+ def _make_channel_filter(alert_receive_channel, filtering_term=None, **kwargs):
+ channel_filter = ChannelFilterFactory(
+ filtering_term=filtering_term,
+ alert_receive_channel=alert_receive_channel,
+ **kwargs,
+ )
+ return channel_filter
+
+ return _make_channel_filter
+
+
+@pytest.fixture
+def make_escalation_chain():
+ def _make_escalation_chain(organization, **kwargs):
+ escalation_chain = EscalationChainFactory(organization=organization, **kwargs)
+ return escalation_chain
+
+ return _make_escalation_chain
+
+
+@pytest.fixture
+def make_escalation_policy():
+ def _make_escalation_policy(escalation_chain, escalation_policy_step, **kwargs):
+ escalation_policy = EscalationPolicyFactory(
+ escalation_chain=escalation_chain, step=escalation_policy_step, **kwargs
+ )
+ return escalation_policy
+
+ return _make_escalation_policy
+
+
+@pytest.fixture
+def make_user_notification_policy():
+ def _make_user_notification_policy(user, step, **kwargs):
+ user_notification_policy = UserNotificationPolicyFactory(user=user, step=step, **kwargs)
+ return user_notification_policy
+
+ return _make_user_notification_policy
+
+
+@pytest.fixture
+def make_user_notification_policy_log_record():
+ def _make_user_notification_policy_log_record(**kwargs):
+ post_save.disconnect(
+ listen_for_usernotificationpolicylogrecord_model_save, sender=UserNotificationPolicyLogRecord
+ )
+ user_notification_policy_log_record = UserNotificationPolicyLogRecordFactory(**kwargs)
+ post_save.connect(listen_for_usernotificationpolicylogrecord_model_save, sender=UserNotificationPolicyLogRecord)
+
+ return user_notification_policy_log_record
+
+ return _make_user_notification_policy_log_record
+
+
+@pytest.fixture
+def make_integration_escalation_chain_route_escalation_policy(
+ make_alert_receive_channel,
+ make_escalation_chain,
+ make_channel_filter,
+ make_escalation_policy,
+):
+ def _make_integration_escalation_chain_route_escalation_policy(organization, escalation_policy_step):
+ alert_receive_channel = make_alert_receive_channel(organization)
+ escalation_chain = make_escalation_chain(organization)
+ default_channel_filter = make_channel_filter(
+ alert_receive_channel, escalation_chain=escalation_chain, is_default=True
+ )
+ escalation_policy = make_escalation_policy(escalation_chain, escalation_policy_step)
+
+ return alert_receive_channel, escalation_chain, default_channel_filter, escalation_policy
+
+ return _make_integration_escalation_chain_route_escalation_policy
+
+
+@pytest.fixture
+def make_invitation():
+ def _make_invitation(alert_group, author, invitee, **kwargs):
+ invitation = InvitationFactory(alert_group=alert_group, author=author, invitee=invitee, **kwargs)
+ return invitation
+
+ return _make_invitation
+
+
+@pytest.fixture
+def make_schedule():
+ def _make_schedule(organization, schedule_class, **kwargs):
+ factory = OnCallScheduleFactory.get_factory_for_class(schedule_class)
+ schedule = factory(organization=organization, **kwargs)
+ return schedule
+
+ return _make_schedule
+
+
+@pytest.fixture
+def make_on_call_shift():
+ def _make_on_call_shift(organization, shift_type, **kwargs):
+ on_call_shift = CustomOnCallShiftFactory(organization=organization, type=shift_type, **kwargs)
+ return on_call_shift
+
+ return _make_on_call_shift
+
+
+@pytest.fixture
+def make_alert_group():
+ def _make_alert_group(alert_receive_channel, **kwargs):
+ alert_group = AlertGroupFactory(channel=alert_receive_channel, **kwargs)
+ return alert_group
+
+ return _make_alert_group
+
+
+@pytest.fixture
+def make_alert_group_log_record():
+ def _make_alert_group_log_record(alert_group, type, author, **kwargs):
+ post_save.disconnect(listen_for_alertgrouplogrecord, sender=AlertGroupLogRecord)
+ log_record = AlertGroupLogRecordFactory(alert_group=alert_group, type=type, author=author, **kwargs)
+ post_save.connect(listen_for_alertgrouplogrecord, sender=AlertGroupLogRecord)
+ return log_record
+
+ return _make_alert_group_log_record
+
+
+@pytest.fixture
+def make_resolution_note():
+ def _make_resolution_note(alert_group, source=ResolutionNote.Source.WEB, author=None, **kwargs):
+ resolution_note = ResolutionNoteFactory(alert_group=alert_group, source=source, author=author, **kwargs)
+ return resolution_note
+
+ return _make_resolution_note
+
+
+@pytest.fixture
+def make_resolution_note_slack_message():
+ def _make_resolution_note_slack_message(alert_group, user, added_by_user, **kwargs):
+ return ResolutionNoteSlackMessageFactory(
+ alert_group=alert_group, user=user, added_by_user=added_by_user, **kwargs
+ )
+
+ return _make_resolution_note_slack_message
+
+
+@pytest.fixture
+def make_alert():
+ def _make_alert(alert_group, raw_request_data, **kwargs):
+ post_save.disconnect(listen_for_alert_model_save, sender=Alert)
+ alert = AlertFactory(group=alert_group, raw_request_data=raw_request_data, **kwargs)
+ post_save.connect(listen_for_alert_model_save, sender=Alert)
+ return alert
+
+ return _make_alert
+
+
+@pytest.fixture
+def make_alert_with_custom_create_method():
+ def _make_alert_with_custom_create_method(
+ title,
+ message,
+ image_url,
+ link_to_upstream_details,
+ alert_receive_channel,
+ integration_unique_data,
+ raw_request_data,
+ **kwargs,
+ ):
+ post_save.disconnect(listen_for_alert_model_save, sender=Alert)
+ alert = Alert.create(
+ title,
+ message,
+ image_url,
+ link_to_upstream_details,
+ alert_receive_channel,
+ integration_unique_data,
+ raw_request_data,
+ **kwargs,
+ )
+ post_save.connect(listen_for_alert_model_save, sender=Alert)
+ return alert
+
+ return _make_alert_with_custom_create_method
+
+
+@pytest.fixture
+def make_custom_action():
+ def _make_custom_action(organization, **kwargs):
+ custom_action = CustomActionFactory(organization=organization, **kwargs)
+ return custom_action
+
+ return _make_custom_action
+
+
+@pytest.fixture
+def make_slack_user_group():
+ def _make_slack_user_group(slack_team_identity, **kwargs):
+ slack_user_group = SlackUserGroupFactory(slack_team_identity=slack_team_identity, **kwargs)
+ return slack_user_group
+
+ return _make_slack_user_group
+
+
+@pytest.fixture
+def make_slack_channel():
+ def _make_slack_channel(slack_team_identity, **kwargs):
+ schedule = SlackChannelFactory(slack_team_identity=slack_team_identity, **kwargs)
+ return schedule
+
+ return _make_slack_channel
+
+
+@pytest.fixture()
+def mock_start_disable_maintenance_task(monkeypatch):
+ def mocked_start_disable_maintenance_task(*args, **kwargs):
+ return uuid.uuid4()
+
+ monkeypatch.setattr(MaintainableObject, "start_disable_maintenance_task", mocked_start_disable_maintenance_task)
+
+
+@pytest.fixture()
+def make_organization_and_user_with_plugin_token(make_organization_and_user, make_token_for_organization):
+ def _make_organization_and_user_with_plugin_token(role=Role.ADMIN):
+ organization, user = make_organization_and_user(role=role)
+ _, token = make_token_for_organization(organization)
+
+ return organization, user, token
+
+ return _make_organization_and_user_with_plugin_token
+
+
+@pytest.fixture()
+def mock_send_user_notification_signal(monkeypatch):
+ def mocked_send_signal(*args, **kwargs):
+ return None
+
+ monkeypatch.setattr(user_notification_action_triggered_signal, "send", mocked_send_signal)
+
+
+@pytest.fixture()
+def make_telegram_user_connector():
+ def _make_telegram_user_connector(user, **kwargs):
+ return TelegramToUserConnectorFactory(user=user, **kwargs)
+
+ return _make_telegram_user_connector
+
+
+@pytest.fixture()
+def make_telegram_channel():
+ def _make_telegram_channel(organization, is_default_channel=False):
+ return TelegramChannelFactory(organization=organization, is_default_channel=is_default_channel)
+
+ return _make_telegram_channel
+
+
+@pytest.fixture()
+def make_telegram_verification_code():
+ def _make_telegram_verification_code(user, **kwargs):
+ return TelegramVerificationCodeFactory(user=user, **kwargs)
+
+ return _make_telegram_verification_code
+
+
+@pytest.fixture()
+def make_telegram_channel_verification_code():
+ def _make_telegram_channel_verification_code(organization, author, **kwargs):
+ return TelegramChannelVerificationCodeFactory(organization=organization, author=author, **kwargs)
+
+ return _make_telegram_channel_verification_code
+
+
+@pytest.fixture()
+def make_telegram_message():
+ def _make_telegram_message(alert_group, message_type, **kwargs):
+ return TelegramMessageFactory(alert_group=alert_group, message_type=message_type, **kwargs)
+
+ return _make_telegram_message
+
+
+@pytest.fixture()
+def make_phone_call():
+ def _make_phone_call(receiver, status, **kwargs):
+ return PhoneCallFactory(receiver=receiver, status=status, **kwargs)
+
+ return _make_phone_call
+
+
+@pytest.fixture()
+def make_sms():
+ def _make_sms(receiver, status, **kwargs):
+ return SMSFactory(receiver=receiver, status=status, **kwargs)
+
+ return _make_sms
+
+
+# TODO: restore email notifications
+# @pytest.fixture()
+# def make_email_message():
+# def _make_email_message(receiver, status, **kwargs):
+# return EmailMessageFactory(receiver=receiver, status=status, **kwargs)
+#
+# return _make_email_message
+
+
+@pytest.fixture()
+def make_live_setting():
+ def _make_live_setting(name, **kwargs):
+ return LiveSettingFactory(name=name, **kwargs)
+
+ return _make_live_setting
+
+
+@pytest.fixture()
+def make_integration_heartbeat():
+ def _make_integration_heartbeat(alert_receive_channel, timeout_seconds=60, last_heartbeat_time=None, **kwargs):
+ return IntegrationHeartBeatFactory(
+ alert_receive_channel=alert_receive_channel,
+ timeout_seconds=timeout_seconds,
+ last_heartbeat_time=last_heartbeat_time,
+ **kwargs,
+ )
+
+ return _make_integration_heartbeat
+
+
+@pytest.fixture()
+def make_organization_log_record():
+ def _make_organization_log_record(organization, user, **kwargs):
+ if "type" not in kwargs:
+ kwargs["type"] = OrganizationLogType.TYPE_SLACK_DEFAULT_CHANNEL_CHANGED
+ return OrganizationLogRecordFactory(organization=organization, author=user, **kwargs)
+
+ return _make_organization_log_record
+
+
+@pytest.fixture()
+def load_slack_urls(settings):
+ clear_url_caches()
+ settings.FEATURE_SLACK_INTEGRATION_ENABLED = True
+ urlconf = settings.ROOT_URLCONF
+ if urlconf in sys.modules:
+ reload(sys.modules[urlconf])
+ else:
+ import_module(urlconf)
diff --git a/engine/engine/__init__.py b/engine/engine/__init__.py
new file mode 100644
index 0000000000..0165ba0dd4
--- /dev/null
+++ b/engine/engine/__init__.py
@@ -0,0 +1,7 @@
+from __future__ import absolute_import, unicode_literals
+
+# This will make sure the app is always imported when
+# Django starts so that shared_task will use this app.
+from .celery import app as celery_app
+
+__all__ = ("celery_app",)
diff --git a/engine/engine/celery.py b/engine/engine/celery.py
new file mode 100644
index 0000000000..d21e968cb7
--- /dev/null
+++ b/engine/engine/celery.py
@@ -0,0 +1,43 @@
+import os
+
+import celery
+from celery.app.log import TaskFormatter
+
+# set the default Django settings module for the 'celery' program.
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.prod")
+
+from django.db import connection # noqa: E402
+
+connection.cursor()
+from celery import Celery # noqa: E402
+
+app = Celery("proj")
+
+# Using a string here means the worker doesn't have to serialize
+# the configuration object to child processes.
+# - namespace='CELERY' means all celery-related configuration keys
+# should have a `CELERY_` prefix.
+app.config_from_object("django.conf:settings", namespace="CELERY")
+
+# Load task modules from all registered Django app configs.
+app.autodiscover_tasks()
+
+
+# This task is required for tests with celery, see:
+# https://stackoverflow.com/questions/46530784/make-django-test-case-database-visible-to-celery
+@app.task(name="celery.ping")
+def ping():
+ # type: () -> str
+ """Simple task that just returns 'pong'."""
+ return "pong"
+
+
+@celery.signals.after_setup_logger.connect
+@celery.signals.after_setup_task_logger.connect
+def on_after_setup_logger(logger, **kwargs):
+ for handler in logger.handlers:
+ handler.setFormatter(
+ TaskFormatter(
+ "%(asctime)s source=engine:celery task_id=%(task_id)s task_name=%(task_name)s name=%(name)s level=%(levelname)s %(message)s"
+ )
+ )
diff --git a/engine/engine/logging/formatters.py b/engine/engine/logging/formatters.py
new file mode 100644
index 0000000000..9ad7f029a9
--- /dev/null
+++ b/engine/engine/logging/formatters.py
@@ -0,0 +1,18 @@
+from django.conf import settings
+from pythonjsonlogger import jsonlogger
+
+
+class CustomStackdriverJsonFormatter(jsonlogger.JsonFormatter):
+ def add_fields(self, log_record, record, message_dict):
+ super(CustomStackdriverJsonFormatter, self).add_fields(log_record, record, message_dict)
+ if (
+ settings.GCP_PROJECT_ID
+ and log_record["request_id"] is not None
+ and len(log_record["request_id"].split("/")) == 2
+ ):
+ trace = log_record["request_id"].split("/")
+ log_record["logging.googleapis.com/trace"] = f"projects/{settings.GCP_PROJECT_ID}/traces/{trace[0]}"
+ if "levelname" in log_record:
+ log_record["severity"] = log_record["levelname"]
+ if "exc_info" in log_record:
+ log_record["@type"] = "type.googleapis.com/google.devtools.clouderrorreporting.v1beta1.ReportedErrorEvent"
diff --git a/engine/engine/management/commands/issue_invite_for_the_frontend.py b/engine/engine/management/commands/issue_invite_for_the_frontend.py
new file mode 100644
index 0000000000..927a2a9fe9
--- /dev/null
+++ b/engine/engine/management/commands/issue_invite_for_the_frontend.py
@@ -0,0 +1,45 @@
+from django.apps import apps
+from django.core.management.base import BaseCommand
+
+from apps.auth_token import crypto
+
+
+class Command(BaseCommand):
+ def add_arguments(self, parser):
+ parser.add_argument(
+ "--override",
+ action="store_true",
+ help="Allow overriding of existing invites.",
+ )
+
+ def handle(self, *args, **options):
+ self.stdout.write("-------------------------")
+ self.stdout.write("👋 This script will issue an invite token to securely connect the frontend.")
+ self.stdout.write(
+ f"Maintainers will be happy to help in the slack channel #grafana-oncall: https://slack.grafana.com/"
+ )
+
+ DynamicSetting = apps.get_model("base", "DynamicSetting")
+ self_hosted_settings = DynamicSetting.objects.get_or_create(
+ name="self_hosted_invitations",
+ defaults={
+ "json_value": {
+ "keys": [],
+ }
+ },
+ )[0]
+
+ if options["override"]:
+ self_hosted_settings.json_value["keys"] = []
+ else:
+ if len(self_hosted_settings.json_value["keys"]) > 0:
+ self.stdout.write(
+ f"Whoops, there is already an active invite in the DB. Override it with --override argument."
+ )
+ return 0
+
+ invite_token = crypto.generate_token_string()
+ self_hosted_settings.json_value["keys"].append(invite_token)
+ self_hosted_settings.save(update_fields=["json_value"])
+
+ self.stdout.write(f"Your invite token: \033[31m{invite_token}\033[39m , use it in the Grafana OnCall plugin.")
diff --git a/engine/engine/management/commands/restart_escalation.py b/engine/engine/management/commands/restart_escalation.py
new file mode 100644
index 0000000000..6c4c082398
--- /dev/null
+++ b/engine/engine/management/commands/restart_escalation.py
@@ -0,0 +1,93 @@
+from celery import uuid as celery_uuid
+from django.core.management import BaseCommand
+from django.db.models import Q
+from django.utils import timezone
+
+from apps.alerts.models import AlertGroup, AlertReceiveChannel
+from apps.alerts.tasks import escalate_alert_group, unsilence_task
+
+
+class Command(BaseCommand):
+ def add_arguments(self, parser):
+ group = parser.add_mutually_exclusive_group(required=True)
+
+ group.add_argument("--alert_group_ids", type=int, nargs="+", help="Alert group IDs to restart escalation for.")
+ group.add_argument(
+ "--all", action="store_true", help="Restart escalation for all alert groups with unfinished escalation."
+ )
+
+ def handle(self, *args, **options):
+ alert_group_ids = options["alert_group_ids"]
+ restart_all = options["all"]
+
+ if restart_all:
+ alert_groups = AlertGroup.all_objects.filter(
+ ~Q(channel__integration=AlertReceiveChannel.INTEGRATION_MAINTENANCE),
+ ~Q(silenced=True, silenced_until__isnull=True), # filter silenced forever alert_groups
+ Q(Q(is_escalation_finished=False) | Q(silenced_until__isnull=False)),
+ resolved=False,
+ acknowledged=False,
+ root_alert_group=None,
+ )
+ else:
+ alert_groups = AlertGroup.all_objects.filter(
+ pk__in=alert_group_ids,
+ )
+
+ if not alert_groups:
+ self.stdout.write("No escalations to restart.")
+ return
+
+ tasks = []
+ alert_groups_to_update = []
+ now = timezone.now()
+
+ for alert_group in alert_groups:
+ task_id = celery_uuid()
+ # if incident was silenced, start unsilence_task
+ if alert_group.is_silenced_for_period:
+ alert_group.unsilence_task_uuid = task_id
+
+ escalation_start_time = max(now, alert_group.silenced_until)
+ alert_group.estimate_escalation_finish_time = alert_group.calculate_eta_for_finish_escalation(
+ start_time=escalation_start_time,
+ )
+ alert_groups_to_update.append(alert_group)
+
+ tasks.append(
+ unsilence_task.signature(
+ args=(alert_group.pk,),
+ immutable=True,
+ task_id=task_id,
+ eta=escalation_start_time,
+ )
+ )
+ # otherwise start escalate_alert_group task
+ else:
+ if alert_group.escalation_snapshot:
+ alert_group.estimate_escalation_finish_time = alert_group.calculate_eta_for_finish_escalation(
+ escalation_started=True,
+ )
+ alert_group.active_escalation_id = task_id
+ alert_groups_to_update.append(alert_group)
+
+ tasks.append(
+ escalate_alert_group.signature(
+ args=(alert_group.pk,),
+ immutable=True,
+ task_id=task_id,
+ eta=alert_group.next_step_eta,
+ )
+ )
+
+ AlertGroup.all_objects.bulk_update(
+ alert_groups_to_update,
+ ["estimate_escalation_finish_time", "active_escalation_id", "unsilence_task_uuid"],
+ batch_size=5000,
+ )
+
+ for task in tasks:
+ task.apply_async()
+
+ restarted_alert_group_ids = ", ".join(str(alert_group.pk) for alert_group in alert_groups)
+ self.stdout.write("Escalations restarted for alert groups: {}".format(restarted_alert_group_ids))
diff --git a/engine/engine/management/commands/start_celery.py b/engine/engine/management/commands/start_celery.py
new file mode 100644
index 0000000000..5c93503638
--- /dev/null
+++ b/engine/engine/management/commands/start_celery.py
@@ -0,0 +1,22 @@
+import shlex
+import subprocess
+
+from django.core.management.base import BaseCommand
+from django.utils import autoreload
+
+WORKER_ID = 0
+
+
+def restart_celery(*args, **kwargs):
+ global WORKER_ID
+ kill_worker_cmd = "celery -A engine control shutdown"
+ subprocess.call(shlex.split(kill_worker_cmd))
+ start_worker_cmd = "celery -A engine worker -l info --concurrency=20 -Q celery,retry -n {}".format(WORKER_ID)
+ subprocess.call(shlex.split(start_worker_cmd))
+ WORKER_ID = 1 + WORKER_ID
+
+
+class Command(BaseCommand):
+ def handle(self, *args, **options):
+ self.stdout.write("Starting celery worker with autoreload...")
+ autoreload.run_with_reloader(restart_celery, args=None, kwargs=None)
diff --git a/engine/engine/middlewares.py b/engine/engine/middlewares.py
new file mode 100644
index 0000000000..90c1e1305b
--- /dev/null
+++ b/engine/engine/middlewares.py
@@ -0,0 +1,93 @@
+import datetime
+import logging
+
+from django.apps import apps
+from django.conf import settings
+from django.core.exceptions import PermissionDenied, RequestDataTooBig
+from django.db import OperationalError
+from django.http import HttpResponse
+from django.utils.deprecation import MiddlewareMixin
+
+logger = logging.getLogger(__name__)
+
+
+class RequestTimeLoggingMiddleware(MiddlewareMixin):
+ @staticmethod
+ def log_message(request, response, tag, message=""):
+ dt = datetime.datetime.utcnow()
+ if not hasattr(request, "_logging_start_dt"):
+ request._logging_start_dt = dt
+ if request.path.startswith("/integrations/v1"):
+ logging.info(f"Start calculating latency for {request.path}")
+ else:
+ seconds = (dt - request._logging_start_dt).total_seconds()
+ status_code = 0 if response is None else response.status_code
+ content_length = request.headers.get("content-length", default=0)
+ integration_type = "N/A"
+ integration_token = "N/A"
+ if request.path.startswith("/integrations/v1"):
+ split_path = request.path.split("/")
+ integration_type = split_path[3]
+ integration_token = split_path[4]
+ logging.info(
+ "inbound "
+ f"latency={str(seconds)} status={status_code} method={request.method} path={request.path} "
+ f"content-length={content_length} slow={int(seconds > settings.SLOW_THRESHOLD_SECONDS)} "
+ f"integration_type={integration_type} "
+ f"integration_token={integration_token}"
+ )
+
+ def process_request(self, request):
+ self.log_message(request, None, "request")
+
+ def process_response(self, request, response):
+ self.log_message(request, response, "response")
+ return response
+
+
+class RequestBodyReadingMiddleware(MiddlewareMixin):
+ def process_request(self, request):
+ # Reading request body, as required by uwsgi
+ # https://uwsgi-docs.readthedocs.io/en/latest/ThingsToKnow.html
+ # "If an HTTP request has a body (like a POST request generated by a form),
+ # you have to read (consume) it in your application.
+ # If you do not do this, the communication socket with your webserver may be clobbered."
+ try:
+ request.body
+ except RequestDataTooBig:
+ return HttpResponse(status=400)
+
+
+class BanAlertConsumptionBasedOnSettingsMiddleware(MiddlewareMixin):
+ """
+ Banning requests for /integrations/v1
+ Banning is not guaranteed.
+ """
+
+ def is_banned(self, path):
+ try:
+ DynamicSetting = apps.get_model("base", "DynamicSetting")
+ banned_paths = DynamicSetting.objects.get_or_create(
+ name="ban_hammer_list",
+ defaults={
+ "json_value": [
+ "full_path_here",
+ ]
+ },
+ )[0]
+ result = any(p for p in banned_paths.json_value if path.startswith(p))
+ return result
+ except OperationalError:
+ # Fallback to make sure we consume the request even if DB is down.
+ logger.info("Cannot connect to database, assuming the request is not banned by default.")
+ return False
+
+ def process_request(self, request):
+ if request.path.startswith("/integrations/v1") and self.is_banned(request.path):
+ try:
+ # Consume request body since other middleware will be skipped
+ request.body
+ except Exception:
+ pass
+ logging.warning(f"{request.path} has been banned")
+ raise PermissionDenied()
diff --git a/engine/engine/parsers.py b/engine/engine/parsers.py
new file mode 100644
index 0000000000..2a37816a6a
--- /dev/null
+++ b/engine/engine/parsers.py
@@ -0,0 +1,48 @@
+from django.conf import settings
+from rest_framework import parsers, renderers
+
+
+def check_content_length(parser_context):
+ """Enforce DATA_UPLOAD_MAX_MEMORY_SIZE for json rest framework API requests."""
+ if parser_context and settings.DATA_UPLOAD_MAX_MEMORY_SIZE and "request" in parser_context:
+ try:
+ content_length = int(parser_context["request"].META.get("CONTENT_LENGTH", 0))
+ except (ValueError, TypeError):
+ content_length = 0
+
+ if content_length and content_length > settings.DATA_UPLOAD_MAX_MEMORY_SIZE or content_length < 0:
+ raise parsers.ParseError("RequestDataTooBig")
+
+
+class JSONParser(parsers.JSONParser):
+ """
+ Parse JSON-serialized data.
+ Enforce django setting for DATA_UPLOAD_MAX_MEMORY_SIZE.
+ """
+
+ media_type = "application/json"
+ renderer_class = renderers.JSONRenderer
+
+ def parse(self, stream, media_type=None, parser_context=None):
+ """Parse incoming bytestream as JSON and returns the resulting data."""
+ # see https://github.com/encode/django-rest-framework/issues/4760
+ check_content_length(parser_context)
+ return super(JSONParser, self).parse(stream, media_type, parser_context)
+
+
+class FormParser(parsers.FormParser):
+ """
+ Parse form data.
+ Enforce django setting for DATA_UPLOAD_MAX_MEMORY_SIZE.
+ """
+
+ media_type = "application/x-www-form-urlencoded"
+
+ def parse(self, stream, media_type=None, parser_context=None):
+ """
+ Parses the incoming bytestream as a URL encoded form,
+ and returns the resulting QueryDict.
+ """
+ # see https://github.com/encode/django-rest-framework/issues/4760
+ check_content_length(parser_context)
+ return super(FormParser, self).parse(stream, media_type, parser_context)
diff --git a/engine/engine/urls.py b/engine/engine/urls.py
new file mode 100644
index 0000000000..9e55241aa1
--- /dev/null
+++ b/engine/engine/urls.py
@@ -0,0 +1,70 @@
+"""engine URL Configuration
+
+The `urlpatterns` list routes URLs to views. For more information please see:
+ https://docs.djangoproject.com/en/2.1/topics/http/urls/
+Examples:
+Function views
+ 1. Add an import: from my_app import views
+ 2. Add a URL to urlpatterns: path('', views.home, name='home')
+Class-based views
+ 1. Add an import: from other_app.views import Home
+ 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
+Including another URLconf
+ 1. Import the include() function: from django.urls import include, path
+ 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
+"""
+from django.conf import settings
+from django.conf.urls.static import static
+from django.contrib import admin
+from django.urls import include, path
+
+from .views import HealthCheckView, ReadinessCheckView, StartupProbeView
+
+urlpatterns = [
+ path("", HealthCheckView.as_view()),
+ path("health/", HealthCheckView.as_view()),
+ path("ready/", ReadinessCheckView.as_view()),
+ path("startupprobe/", StartupProbeView.as_view()),
+ # path('slow/', SlowView.as_view()),
+ # path('exception/', ExceptionView.as_view()),
+ path(settings.ONCALL_DJANGO_ADMIN_PATH, admin.site.urls),
+ path("api/gi/v1/", include("apps.api_for_grafana_incident.urls", namespace="api-gi")),
+ path("api/internal/v1/", include("apps.api.urls", namespace="api-internal")),
+ path("api/internal/v1/", include("social_django.urls", namespace="social")),
+ path("api/internal/v1/plugin/", include("apps.grafana_plugin.urls", namespace="grafana-plugin")),
+ path("api/internal/v1/", include("apps.grafana_plugin_management.urls", namespace="grafana-plugin-management")),
+ path("api/internal/v1/", include("apps.social_auth.urls", namespace="social_auth")),
+ path("integrations/v1/", include("apps.integrations.urls", namespace="integrations")),
+ path("twilioapp/", include("apps.twilioapp.urls")),
+ # path('sendgridapp/', include('apps.sendgridapp.urls')), TODO: restore email notifications
+ path("api/v1/", include("apps.public_api.urls", namespace="api-public")),
+ path("api/internal/v1/", include("apps.migration_tool.urls", namespace="migration-tool")),
+] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
+
+if settings.FEATURE_SLACK_INTEGRATION_ENABLED:
+ urlpatterns += [
+ path("api/internal/v1/slack/", include("apps.slack.urls")),
+ ]
+
+if settings.FEATURE_TELEGRAM_INTEGRATION_ENABLED:
+ urlpatterns += [path("telegram/", include("apps.telegram.urls"))]
+
+if settings.FEATURE_SLACK_INTEGRATION_ENABLED:
+ urlpatterns += [
+ path("slack/", include("apps.slack.urls")),
+ ]
+
+if settings.OSS_INSTALLATION_FEATURES_ENABLED:
+ urlpatterns += [
+ path("api/internal/v1/", include("apps.oss_installation.urls")),
+ ]
+
+if settings.DEBUG:
+ import debug_toolbar
+
+ urlpatterns = [
+ path("__debug__/", include(debug_toolbar.urls)),
+ ] + urlpatterns
+
+
+admin.site.site_header = settings.ADMIN_SITE_HEADER
diff --git a/engine/engine/views.py b/engine/engine/views.py
new file mode 100644
index 0000000000..045c245e1a
--- /dev/null
+++ b/engine/engine/views.py
@@ -0,0 +1,70 @@
+import time
+
+from django.core.cache import cache
+from django.http import HttpResponse
+from django.views.generic import View
+
+from apps.integrations.mixins import AlertChannelDefiningMixin
+from common.custom_celery_tasks import shared_dedicated_queue_retry_task
+
+
+@shared_dedicated_queue_retry_task(ignore_result=True)
+def health_check_task():
+ return "Ok"
+
+
+class HealthCheckView(View):
+ """
+ This view is used in k8s liveness probe.
+ k8s periodically makes requests to this view and
+ if the requests fail the container will be restarted
+ """
+
+ dangerously_bypass_middlewares = True
+
+ def get(self, request):
+ return HttpResponse("Ok")
+
+
+class ReadinessCheckView(View):
+ """
+ This view is used in k8s readiness probe.
+ k8s periodically makes requests to this view and
+ if the requests fail the container will stop getting the traffic.
+ """
+
+ dangerously_bypass_middlewares = True
+
+ def get(self, request):
+ return HttpResponse("Ok")
+
+
+class StartupProbeView(View):
+ """
+ This view is used in k8s startup probe.
+ k8s makes requests to this view on the startup and
+ if the requests fail the container will be restarted
+ Caching AlertReceive channels if they are not cached. Also checking initial database connection.
+ """
+
+ dangerously_bypass_middlewares = True
+
+ def get(self, request):
+ if cache.get(AlertChannelDefiningMixin.CACHE_KEY_DB_FALLBACK) is None:
+ AlertChannelDefiningMixin().update_alert_receive_channel_cache()
+
+ cache.set("healthcheck", "healthcheck", 30) # Checking cache connectivity
+ assert cache.get("healthcheck") == "healthcheck"
+
+ return HttpResponse("Ok")
+
+
+class SlowView(View):
+ def get(self, request):
+ time.sleep(1.5)
+ return HttpResponse("Slept well.")
+
+
+class ExceptionView(View):
+ def get(self, request):
+ raise Exception("Trying exception!")
diff --git a/engine/engine/wsgi.py b/engine/engine/wsgi.py
new file mode 100644
index 0000000000..05f06afde7
--- /dev/null
+++ b/engine/engine/wsgi.py
@@ -0,0 +1,18 @@
+"""
+WSGI config for engine project.
+
+It exposes the WSGI callable as a module-level variable named ``application``.
+
+For more information on this file, see
+https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
+"""
+
+import os
+
+from django.core.wsgi import get_wsgi_application
+from whitenoise import WhiteNoise
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.prod")
+
+application = get_wsgi_application()
+application = WhiteNoise(application)
diff --git a/engine/manage.py b/engine/manage.py
new file mode 100755
index 0000000000..7912efd7f4
--- /dev/null
+++ b/engine/manage.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+import os
+import sys
+
+if __name__ == "__main__":
+ os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.dev")
+ try:
+ from django.core.management import execute_from_command_line
+ except ImportError as exc:
+ raise ImportError(
+ "Couldn't import Django. Are you sure it's installed and "
+ "available on your PYTHONPATH environment variable? Did you "
+ "forget to activate a virtual environment?"
+ ) from exc
+ execute_from_command_line(sys.argv)
diff --git a/engine/pyproject.toml b/engine/pyproject.toml
new file mode 100644
index 0000000000..655ee3fdaf
--- /dev/null
+++ b/engine/pyproject.toml
@@ -0,0 +1,10 @@
+[tool.isort]
+profile = "black"
+line_length=120
+py_version=39
+extend_skip_glob = "**/migrations/**"
+
+[tool.black]
+line-length = 120
+target-version = ["py39"]
+force-exclude = "migrations"
diff --git a/engine/requirements.txt b/engine/requirements.txt
new file mode 100644
index 0000000000..a9dfc03d8d
--- /dev/null
+++ b/engine/requirements.txt
@@ -0,0 +1,41 @@
+django==3.2.5
+djangorestframework==3.12.4
+slackclient==1.3.0
+whitenoise==5.3.0
+twilio~=6.37.0
+phonenumbers==8.10.0
+django-ordered-model==3.1.1
+celery==4.3.0
+redis==3.2.0
+django-celery-results==1.0.4
+humanize==0.5.1
+django-mysql==2.4.1
+uwsgi==2.0.20
+django-cors-headers==3.7.0
+django-debug-toolbar==3.2.1
+django-sns-view==0.1.2
+kombu==4.5.0
+python-telegram-bot==11.1.0
+django-silk==4.1.0
+django-redis-cache==3.0.0
+hiredis==1.0.0
+django-ratelimit==2.0.0
+django-filter==2.4.0
+icalendar==4.0.7
+recurring-ical-events==0.1.16b0
+slack-export-viewer==1.0.0
+beautifulsoup4==4.8.1
+social-auth-app-django==3.1.0
+sendgrid==6.1.2
+cryptography==2.9.2
+pytest==5.4.3
+pytest-django==3.9.0
+pytest_factoryboy==2.0.3
+factory-boy<3.0
+python-json-logger==2.0.1
+django-log-request-id==1.6.0
+django-polymorphic==3.0.0
+django-rest-polymorphic==0.1.9
+pre-commit==2.15.0
+https://github.com/iskhakov/django-push-notifications/archive/refs/tags/2.0.0-hotfix-4.tar.gz
+django-mirage-field==1.3.0
diff --git a/engine/scripts/start_all_in_one.sh b/engine/scripts/start_all_in_one.sh
new file mode 100644
index 0000000000..f4a64e3988
--- /dev/null
+++ b/engine/scripts/start_all_in_one.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+export DJANGO_SETTINGS_MODULE=settings.all_in_one
+
+generate_value_if_not_exist ()
+{
+ if [ ! -f /etc/app/secret_data/$1 ]; then
+ touch /etc/app/secret_data/$1
+ base64 /dev/urandom | head -c $2 > /etc/app/secret_data/$1
+fi
+export $1=$(cat /etc/app/secret_data/$1)
+}
+
+generate_value_if_not_exist SECRET_KEY 75
+
+generate_value_if_not_exist MIRAGE_SECRET_KEY 75
+generate_value_if_not_exist MIRAGE_CIPHER_IV 16
+
+export BASE_URL=http://localhost:8000
+
+echo "Starting redis in the background"
+# Redis will dump the changes to the volume every 60 seconds if at least 1 key changed
+redis-server --daemonize yes --save 60 1 --dir /etc/app/redis_data/
+echo "Running migrations"
+python manage.py migrate
+
+echo "Start celery"
+python manage.py start_celery &
+
+# Postponing token issuing to make sure it's the last record in the console.
+bash -c 'sleep 10; python manage.py issue_invite_for_the_frontend --override' &
+
+echo "Starting server"
+python manage.py runserver 0.0.0.0:8000 --noreload
diff --git a/engine/settings/__init__.py b/engine/settings/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/engine/settings/all_in_one.py b/engine/settings/all_in_one.py
new file mode 100644
index 0000000000..e2196274f1
--- /dev/null
+++ b/engine/settings/all_in_one.py
@@ -0,0 +1,58 @@
+import sys
+from random import randrange
+
+from .prod_without_db import * # noqa
+
+DATABASES = {
+ "default": {
+ "ENGINE": "django.db.backends.sqlite3",
+ "NAME": os.path.join(BASE_DIR, "sqlite_data/db.sqlite3"), # noqa
+ },
+}
+
+TESTING = "pytest" in sys.modules or "unittest" in sys.modules
+
+CACHES = {
+ "default": {
+ "BACKEND": "redis_cache.RedisCache",
+ "LOCATION": [
+ "localhost:6379",
+ ],
+ "OPTIONS": {
+ "DB": 1,
+ "PARSER_CLASS": "redis.connection.HiredisParser",
+ "CONNECTION_POOL_CLASS": "redis.BlockingConnectionPool",
+ "CONNECTION_POOL_CLASS_KWARGS": {
+ "max_connections": 50,
+ "timeout": 20,
+ },
+ "MAX_CONNECTIONS": 1000,
+ "PICKLE_VERSION": -1,
+ },
+ },
+}
+
+CELERY_BROKER_URL = "redis://localhost:6379/0"
+
+if TESTING:
+ TELEGRAM_TOKEN = "0000000000:XXXXXXXXXXXXXXXXXXXXXXXXXXXX-XXXXXX"
+ TWILIO_AUTH_TOKEN = "twilio_auth_token"
+
+# TODO: OSS: Add these setting to oss settings file. Add Version there too.
+OSS_INSTALLATION_FEATURES_ENABLED = True
+
+INSTALLED_APPS += ["apps.oss_installation"] # noqa
+
+CELERY_BEAT_SCHEDULE["send_usage_stats"] = { # noqa
+ "task": "apps.oss_installation.tasks.send_usage_stats_report",
+ "schedule": crontab(hour=0, minute=randrange(0, 59)), # Send stats report at a random minute past midnight # noqa
+ "args": (),
+} # noqa
+
+CELERY_BEAT_SCHEDULE["send_cloud_heartbeat"] = { # noqa
+ "task": "apps.oss_installation.tasks.send_cloud_heartbeat",
+ "schedule": crontab(minute="*/3"), # noqa
+ "args": (),
+} # noqa
+
+SEND_ANONYMOUS_USAGE_STATS = True
diff --git a/engine/settings/base.py b/engine/settings/base.py
new file mode 100644
index 0000000000..b2150a479f
--- /dev/null
+++ b/engine/settings/base.py
@@ -0,0 +1,438 @@
+import os
+from urllib.parse import urljoin
+
+from celery.schedules import crontab
+
+from common.utils import getenv_boolean
+
+VERSION = "dev-oss"
+SEND_ANONYMOUS_USAGE_STATS = False
+
+# License is OpenSource or Cloud
+OPEN_SOURCE_LICENSE_NAME = "OpenSource"
+CLOUD_LICENSE_NAME = "Cloud"
+LICENSE = os.environ.get("ONCALL_LICENSE", default=OPEN_SOURCE_LICENSE_NAME)
+
+DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
+
+# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+# Quick-start development settings - unsuitable for production
+# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
+
+# SECURITY WARNING: keep the secret key used in production secret!
+SECRET_KEY = os.environ.get("SECRET_KEY")
+TOKEN_SECRET = os.environ.get("TOKEN_SECRET", SECRET_KEY)
+# TO generate it use
+# cat /dev/urandom | base64 | tr -dc '0-9a-zA-Z!@#$%^&*(-_=+)' | head -c75
+TOKEN_SALT = os.environ.get("TOKEN_SALT", "")
+
+# django-mirage-field related settings
+MIRAGE_SECRET_KEY = os.environ.get("MIRAGE_SECRET_KEY")
+MIRAGE_CIPHER_IV = os.environ.get("MIRAGE_CIPHER_IV")
+MIRAGE_CIPHER_MODE = "CBC"
+
+# SECURITY WARNING: don't run with debug turned on in production!
+DEBUG = False
+
+ALLOWED_HOSTS = [item.strip() for item in os.environ.get("ALLOWED_HOSTS", "*").split(",")]
+
+# TODO: update link to up-to-date docs
+DOCS_URL = "https://grafana.com/docs/grafana-cloud/oncall/"
+
+# Settings of running OnCall instance.
+BASE_URL = os.environ.get("BASE_URL") # Root URL of OnCall backend
+
+# Feature toggles
+FEATURE_LIVE_SETTINGS_ENABLED = getenv_boolean("FEATURE_LIVE_SETTINGS_ENABLED", default=True)
+FEATURE_TELEGRAM_INTEGRATION_ENABLED = getenv_boolean("FEATURE_TELEGRAM_INTEGRATION_ENABLED", default=False)
+FEATURE_EMAIL_INTEGRATION_ENABLED = getenv_boolean("FEATURE_EMAIL_INTEGRATION_ENABLED", default=False)
+FEATURE_SLACK_INTEGRATION_ENABLED = getenv_boolean("FEATURE_SLACK_INTEGRATION_ENABLED", default=False)
+OSS_INSTALLATION_FEATURES_ENABLED = False
+
+TWILIO_ACCOUNT_SID = os.environ.get("TWILIO_ACCOUNT_SID")
+TWILIO_AUTH_TOKEN = os.environ.get("TWILIO_AUTH_TOKEN")
+TWILIO_NUMBER = os.environ.get("TWILIO_NUMBER")
+TWILIO_VERIFY_SERVICE_SID = os.environ.get("TWILIO_VERIFY_SERVICE_SID")
+
+TELEGRAM_WEBHOOK_URL = os.environ.get("TELEGRAM_WEBHOOK_URL", urljoin(BASE_URL, "/telegram/"))
+TELEGRAM_TOKEN = os.environ.get("TELEGRAM_TOKEN")
+
+os.environ.setdefault("MYSQL_PASSWORD", "empty")
+os.environ.setdefault("RABBIT_URI", "empty")
+
+# For Sending email
+SENDGRID_API_KEY = os.environ.get("SENDGRID_API_KEY")
+SENDGRID_FROM_EMAIL = os.environ.get("SENDGRID_FROM_EMAIL")
+
+# For Inbound email
+SENDGRID_SECRET_KEY = os.environ.get("SENDGRID_SECRET_KEY")
+SENDGRID_INBOUND_EMAIL_DOMAIN = os.environ.get("SENDGRID_INBOUND_EMAIL_DOMAIN")
+
+# Application definition
+
+INSTALLED_APPS = [
+ "django.contrib.admin",
+ "django.contrib.auth",
+ "django.contrib.contenttypes",
+ "django.contrib.sessions",
+ "django.contrib.messages",
+ "django.contrib.staticfiles",
+ "rest_framework",
+ "django_filters",
+ "ordered_model",
+ "mirage",
+ "engine",
+ "apps.user_management",
+ "apps.alerts",
+ "apps.integrations",
+ "apps.schedules",
+ "apps.heartbeat",
+ "apps.slack",
+ "apps.telegram",
+ "apps.twilioapp",
+ "apps.api",
+ "apps.api_for_grafana_incident",
+ "apps.base",
+ # "apps.sendgridapp", TODO: restore email notifications
+ "apps.auth_token",
+ "apps.public_api",
+ "apps.grafana_plugin",
+ "apps.grafana_plugin_management",
+ "apps.migration_tool",
+ "django_celery_results",
+ "corsheaders",
+ "debug_toolbar",
+ "social_django",
+ "polymorphic",
+ "push_notifications",
+]
+
+REST_FRAMEWORK = {
+ "DEFAULT_PARSER_CLASSES": (
+ "engine.parsers.JSONParser",
+ "engine.parsers.FormParser",
+ "rest_framework.parsers.MultiPartParser",
+ ),
+ "DEFAULT_AUTHENTICATION_CLASSES": [],
+}
+
+MIDDLEWARE = [
+ "log_request_id.middleware.RequestIDMiddleware",
+ "engine.middlewares.RequestTimeLoggingMiddleware",
+ "engine.middlewares.BanAlertConsumptionBasedOnSettingsMiddleware",
+ "engine.middlewares.RequestBodyReadingMiddleware",
+ "django.middleware.security.SecurityMiddleware",
+ "whitenoise.middleware.WhiteNoiseMiddleware",
+ "debug_toolbar.middleware.DebugToolbarMiddleware",
+ "corsheaders.middleware.CorsMiddleware",
+ "django.middleware.common.CommonMiddleware",
+ "django.contrib.sessions.middleware.SessionMiddleware",
+ "django.middleware.common.CommonMiddleware",
+ "django.middleware.csrf.CsrfViewMiddleware",
+ "django.contrib.auth.middleware.AuthenticationMiddleware",
+ "django.contrib.messages.middleware.MessageMiddleware",
+ "django.middleware.clickjacking.XFrameOptionsMiddleware",
+ "social_django.middleware.SocialAuthExceptionMiddleware",
+ "apps.social_auth.middlewares.SocialAuthAuthCanceledExceptionMiddleware",
+]
+
+LOG_REQUEST_ID_HEADER = "HTTP_X_CLOUD_TRACE_CONTEXT"
+
+LOGGING = {
+ "version": 1,
+ "disable_existing_loggers": False,
+ "filters": {"request_id": {"()": "log_request_id.filters.RequestIDFilter"}},
+ "formatters": {
+ "standard": {"format": "source=engine:app google_trace_id=%(request_id)s logger=%(name)s %(message)s"},
+ },
+ "handlers": {
+ "console": {
+ "class": "logging.StreamHandler",
+ "filters": ["request_id"],
+ "formatter": "standard",
+ },
+ },
+ "loggers": {
+ "": {
+ "handlers": ["console"],
+ "level": "INFO",
+ "propagate": True,
+ },
+ },
+}
+
+ROOT_URLCONF = "engine.urls"
+
+TEMPLATES = [
+ {
+ "BACKEND": "django.template.backends.django.DjangoTemplates",
+ "DIRS": [],
+ "APP_DIRS": True,
+ "OPTIONS": {
+ "context_processors": [
+ "django.template.context_processors.debug",
+ "django.template.context_processors.request",
+ "django.contrib.auth.context_processors.auth",
+ "django.contrib.messages.context_processors.messages",
+ ],
+ },
+ },
+]
+
+WSGI_APPLICATION = "engine.wsgi.application"
+
+# Password validation
+# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
+
+AUTH_PASSWORD_VALIDATORS = [
+ {
+ "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
+ },
+ {
+ "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
+ },
+ {
+ "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
+ },
+ {
+ "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
+ },
+]
+
+# Internationalization
+# https://docs.djangoproject.com/en/2.1/topics/i18n/
+
+LANGUAGE_CODE = "en-us"
+
+TIME_ZONE = "UTC"
+
+USE_I18N = True
+
+USE_L10N = True
+
+USE_TZ = True
+
+# Static files (CSS, JavaScript, Images)
+# https://docs.djangoproject.com/en/2.1/howto/static-files/
+
+STATIC_URL = "/static/"
+STATICFILES_DIRS = [
+ "./static",
+]
+
+CELERY_BROKER_URL = "amqp://rabbitmq:rabbitmq@localhost:5672"
+
+# By default, apply_async will just hang indefinitely trying to reach to RabbitMQ even if RabbitMQ is down.
+# This makes apply_async retry 3 times trying to reach to RabbitMQ, with some extra info on periods between retries.
+# https://docs.celeryproject.org/en/stable/userguide/configuration.html#std-setting-broker_transport_options
+# Note that max_retries is not related to task retries, but to rabbitmq specific kombu settings.
+CELERY_BROKER_TRANSPORT_OPTIONS = {"max_retries": 3, "interval_start": 0, "interval_step": 0.2, "interval_max": 0.5}
+
+CELERY_IGNORE_RESULT = True
+CELERY_ACKS_LATE = True
+
+CELERY_TASK_ACKS_LATE = True
+
+CELERY_WORKER_CONCURRENCY = 1
+CELERY_MAX_TASKS_PER_CHILD = 1
+
+CELERY_WORKER_SEND_TASK_EVENTS = True
+CELERY_TASK_SEND_SENT_EVENT = True
+
+CELERY_BEAT_SCHEDULE = {
+ "restore_heartbeat_tasks": {
+ "task": "apps.heartbeat.tasks.restore_heartbeat_tasks",
+ "schedule": 10 * 60,
+ "args": (),
+ },
+ "check_escalations": {
+ "task": "apps.alerts.tasks.check_escalation_finished.check_escalation_finished_task",
+ "schedule": 10 * 60,
+ "args": (),
+ },
+ "start_refresh_ical_files": {
+ "task": "apps.schedules.tasks.refresh_ical_files.start_refresh_ical_files",
+ "schedule": 10 * 60,
+ "args": (),
+ },
+ "start_notify_about_gaps_in_schedule": {
+ "task": "apps.schedules.tasks.notify_about_gaps_in_schedule.start_notify_about_gaps_in_schedule",
+ "schedule": crontab(minute=1, hour=12, day_of_week="monday"),
+ "args": (),
+ },
+ "start_check_gaps_in_schedule": {
+ "task": "apps.schedules.tasks.notify_about_gaps_in_schedule.start_check_gaps_in_schedule",
+ "schedule": crontab(minute=0, hour=0),
+ "args": (),
+ },
+ "start_notify_about_empty_shifts_in_schedule": {
+ "task": "apps.schedules.tasks.notify_about_empty_shifts_in_schedule.start_notify_about_empty_shifts_in_schedule",
+ "schedule": crontab(minute=0, hour=12, day_of_week="monday"),
+ "args": (),
+ },
+ "start_check_empty_shifts_in_schedule": {
+ "task": "apps.schedules.tasks.notify_about_empty_shifts_in_schedule.start_check_empty_shifts_in_schedule",
+ "schedule": crontab(minute=0, hour=0),
+ "args": (),
+ },
+ "populate_slack_usergroups": {
+ "task": "apps.slack.tasks.populate_slack_usergroups",
+ "schedule": crontab(minute=0, hour=9, day_of_week="monday,wednesday,friday"),
+ "args": (),
+ },
+ "populate_slack_channels": {
+ "task": "apps.slack.tasks.populate_slack_channels",
+ "schedule": crontab(minute=0, hour=9, day_of_week="tuesday,thursday"),
+ "args": (),
+ },
+ "check_maintenance_finished": {
+ "task": "apps.alerts.tasks.maintenance.check_maintenance_finished",
+ "schedule": crontab(hour="*", minute=5),
+ "args": (),
+ },
+ "start_sync_organizations": {
+ "task": "apps.grafana_plugin.tasks.sync.start_sync_organizations",
+ "schedule": crontab(minute="*/30"),
+ "args": (),
+ },
+ "process_failed_to_invoke_celery_tasks": {
+ "task": "apps.base.tasks.process_failed_to_invoke_celery_tasks",
+ "schedule": 60 * 10,
+ "args": (),
+ },
+}
+
+INTERNAL_IPS = ["127.0.0.1"]
+
+SELF_IP = os.environ.get("SELF_IP")
+
+SILK_PATH = os.environ.get("SILK_PATH", "silk/")
+SILKY_AUTHENTICATION = True
+SILKY_AUTHORISATION = True
+SILKY_META = True
+SILKY_INTERCEPT_PERCENT = 1
+SILKY_MAX_RECORDED_REQUESTS = 10**4
+
+INSTALLED_APPS += ["silk"]
+# get ONCALL_DJANGO_ADMIN_PATH from env and add trailing / to it
+ONCALL_DJANGO_ADMIN_PATH = os.environ.get("ONCALL_DJANGO_ADMIN_PATH", "django-admin") + "/"
+
+ADMIN_SITE_HEADER = "OnCall Admin Panel"
+
+# Social auth settings
+SOCIAL_AUTH_USER_MODEL = "user_management.User"
+SOCIAL_AUTH_STRATEGY = "apps.social_auth.live_setting_django_strategy.LiveSettingDjangoStrategy"
+
+# https://python-social-auth.readthedocs.io/en/latest/configuration/settings.html
+AUTHENTICATION_BACKENDS = [
+ "apps.social_auth.backends.InstallSlackOAuth2V2",
+ "apps.social_auth.backends.LoginSlackOAuth2V2",
+ "django.contrib.auth.backends.ModelBackend",
+]
+
+SLACK_SIGNING_SECRET = os.environ.get("SLACK_SIGNING_SECRET")
+SLACK_SIGNING_SECRET_LIVE = os.environ.get("SLACK_SIGNING_SECRET_LIVE", "")
+
+SLACK_CLIENT_OAUTH_ID = os.environ.get("SLACK_CLIENT_OAUTH_ID")
+SLACK_CLIENT_OAUTH_SECRET = os.environ.get("SLACK_CLIENT_OAUTH_SECRET")
+
+SLACK_SLASH_COMMAND_NAME = os.environ.get("SLACK_SLASH_COMMAND_NAME", "/oncall")
+
+SOCIAL_AUTH_SLACK_LOGIN_KEY = SLACK_CLIENT_OAUTH_ID
+SOCIAL_AUTH_SLACK_LOGIN_SECRET = SLACK_CLIENT_OAUTH_SECRET
+
+SOCIAL_AUTH_SETTING_NAME_TO_LIVE_SETTING_NAME = {
+ "SOCIAL_AUTH_SLACK_INSTALL_FREE_KEY": "SLACK_CLIENT_OAUTH_ID",
+ "SOCIAL_AUTH_SLACK_INSTALL_FREE_SECRET": "SLACK_CLIENT_OAUTH_SECRET",
+}
+SOCIAL_AUTH_SLACK_INSTALL_FREE_CUSTOM_SCOPE = [
+ "bot",
+ "chat:write:bot",
+ "users:read",
+ "users.profile:read",
+ "commands",
+ "usergroups:read",
+]
+
+SOCIAL_AUTH_PIPELINE = (
+ "apps.social_auth.pipeline.set_user_and_organization_from_request",
+ "social_core.pipeline.social_auth.social_details",
+ "apps.social_auth.pipeline.connect_user_to_slack",
+ "apps.social_auth.pipeline.populate_slack_identities",
+ "apps.social_auth.pipeline.delete_slack_auth_token",
+)
+
+SOCIAL_AUTH_FIELDS_STORED_IN_SESSION = []
+SOCIAL_AUTH_REDIRECT_IS_HTTPS = getenv_boolean("SOCIAL_AUTH_REDIRECT_IS_HTTPS", default=True)
+SOCIAL_AUTH_SLUGIFY_USERNAMES = True
+
+FEATURE_CAPTCHA_ENABLED = getenv_boolean("FEATURE_CAPTCHA_ENABLED", default=False)
+RECAPTCHA_SECRET_KEY = os.environ.get("RECAPTCHA_SECRET_KEY")
+
+PUBLIC_PRIMARY_KEY_MIN_LENGTH = 12
+# excluding (O,0) Result: (25 + 9)^12 combinations
+PUBLIC_PRIMARY_KEY_ALLOWED_CHARS = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
+
+AUTH_LINK_TIMEOUT_SECONDS = 300
+SLACK_AUTH_TOKEN_TIMEOUT_SECONDS = 300
+
+SLACK_INSTALL_RETURN_REDIRECT_HOST = os.environ.get("SLACK_INSTALL_RETURN_REDIRECT_HOST", None)
+
+SESSION_COOKIE_DOMAIN = os.environ.get("SESSION_COOKIE_DOMAIN", None)
+SESSION_COOKIE_NAME = "oncall_session"
+
+GRAFANA_COM_API_URL = os.environ.get("GRAFANA_COM_API_URL", "https://grafana.com/api/")
+GRAFANA_COM_USER_AGENT = "Grafana OnCall"
+GRAFANA_COM_API_TOKEN = os.environ.get("GCOM_API_TOKEN", None)
+GRAFANA_COM_ADMIN_API_TOKEN = os.environ.get("GRAFANA_COM_ADMIN_API_TOKEN", None)
+
+GRAFANA_API_KEY_NAME = "Grafana OnCall"
+
+MOBILE_APP_PUSH_NOTIFICATIONS_ENABLED = getenv_boolean("MOBILE_APP_PUSH_NOTIFICATIONS_ENABLED", default=False)
+
+PUSH_NOTIFICATIONS_SETTINGS = {
+ "APNS_AUTH_KEY_PATH": os.environ.get("APNS_AUTH_KEY_PATH", None),
+ "APNS_TOPIC": os.environ.get("APNS_TOPIC", None),
+ "APNS_AUTH_KEY_ID": os.environ.get("APNS_AUTH_KEY_ID", None),
+ "APNS_TEAM_ID": os.environ.get("APNS_TEAM_ID", None),
+ "APNS_USE_SANDBOX": True,
+ "USER_MODEL": "user_management.User",
+}
+
+SELF_HOSTED_SETTINGS = {
+ "STACK_ID": 5,
+ "STACK_SLUG": "self_hosted_stack",
+ "ORG_ID": 100,
+ "ORG_SLUG": "self_hosted_org",
+ "ORG_TITLE": "Self-Hosted Organization",
+}
+
+GRAFANA_CLOUD_ONCALL_API_URL = os.environ.get("GRAFANA_CLOUD_ONCALL_API_URL", "https://a-prod-us-central-0.grafana.net")
+GRAFANA_CLOUD_ONCALL_TOKEN = os.environ.get("GRAFANA_CLOUD_ONCALL_TOKEN", None)
+GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED = getenv_boolean("GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED", default=True)
+
+GRAFANA_INCIDENT_STATIC_API_KEY = os.environ.get("GRAFANA_INCIDENT_STATIC_API_KEY", None)
+
+DATA_UPLOAD_MAX_MEMORY_SIZE = 5242880
+
+# Log inbound/outbound calls as slow=1 if they exceed threshold
+SLOW_THRESHOLD_SECONDS = 2.0
+
+FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED = getenv_boolean("FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED", default=False)
+EXTRA_MESSAGING_BACKENDS = []
+
+INSTALLED_ONCALL_INTEGRATIONS = [
+ "apps.integrations.metadata.configuration.alertmanager",
+ "apps.integrations.metadata.configuration.grafana",
+ "apps.integrations.metadata.configuration.grafana_alerting",
+ "apps.integrations.metadata.configuration.formatted_webhook",
+ "apps.integrations.metadata.configuration.webhook",
+ "apps.integrations.metadata.configuration.amazon_sns",
+ "apps.integrations.metadata.configuration.heartbeat",
+ "apps.integrations.metadata.configuration.inbound_email",
+ "apps.integrations.metadata.configuration.maintenance",
+ "apps.integrations.metadata.configuration.manual",
+ "apps.integrations.metadata.configuration.slack_channel",
+]
diff --git a/engine/settings/ci-test.py b/engine/settings/ci-test.py
new file mode 100644
index 0000000000..f3c012a002
--- /dev/null
+++ b/engine/settings/ci-test.py
@@ -0,0 +1,29 @@
+from .base import * # noqa
+
+SECRET_KEY = "u5/IIbuiJR3Y9FQMBActk+btReZ5oOxu+l8MIJQWLfVzESoan5REE6UNSYYEQdjBOcty9CDak2X"
+
+MIRAGE_SECRET_KEY = "V9u7DqZ6SrZHP+SvBT19dbB85NZJGgllpwYQ77BSr9kZ6n8ggXMfGd4sCll1TDcAPEolbVD8YbF"
+MIRAGE_CIPHER_IV = "X+VFcDqtxJ5bbU+V"
+
+BASE_URL = "http://localhost"
+
+CELERY_BROKER_URL = "amqp://rabbitmq:rabbitmq@rabbit_test:5672"
+
+# Primary database must have the name "default"
+DATABASES = {
+ "default": {
+ "ENGINE": "django.db.backends.sqlite3",
+ "NAME": os.path.join(BASE_DIR, "sqlite_data/db.sqlite3"), # noqa
+ },
+}
+
+# Dummy Telegram token (fake one)
+TELEGRAM_TOKEN = "0000000000:XXXXXXXXXXXXXXXXXXXXXXXXXXXX-XXXXXX"
+
+SENDGRID_FROM_EMAIL = "dummy_sendgrid_from_email@test.ci-test"
+SENDGRID_SECRET_KEY = "dummy_sendgrid_secret_key"
+TWILIO_ACCOUNT_SID = "dummy_twilio_account_sid"
+TWILIO_AUTH_TOKEN = "dummy_twilio_auth_token"
+
+FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED = True
+EXTRA_MESSAGING_BACKENDS = ["apps.base.tests.messaging_backend.TestOnlyBackend"]
diff --git a/engine/settings/dev.py b/engine/settings/dev.py
new file mode 100644
index 0000000000..aff8ca9df5
--- /dev/null
+++ b/engine/settings/dev.py
@@ -0,0 +1,92 @@
+import os
+import sys
+
+from .base import * # noqa
+
+SECRET_KEY = os.environ.get("SECRET_KEY", "osMsNM0PqlRHBlUvqmeJ7+ldU3IUETCrY9TrmiViaSmInBHolr1WUlS0OFS4AHrnnkp1vp9S9z1")
+
+MIRAGE_SECRET_KEY = os.environ.get(
+ "MIRAGE_SECRET_KEY", "sIrmyTvh+Go+h/2E46SnYGwgkKyH6IF6MXZb65I40HVCbj0+dD3JvpAqppEwFb7Vxnxlvtey+EL"
+)
+MIRAGE_CIPHER_IV = os.environ.get("MIRAGE_CIPHER_IV", "tZZa+60zTZO2NRcS")
+
+# Primary database must have the name "default"
+DATABASES = {
+ "default": {
+ "ENGINE": "django.db.backends.sqlite3",
+ "NAME": os.path.join(BASE_DIR, "sqlite_data/db.sqlite3"), # noqa
+ },
+}
+
+TESTING = "pytest" in sys.modules or "unittest" in sys.modules
+
+READONLY_DATABASES = {}
+
+# Dictionaries concatenation, introduced in python3.9
+DATABASES = DATABASES | READONLY_DATABASES
+
+CACHES = {
+ "default": {
+ "BACKEND": "redis_cache.RedisCache",
+ "LOCATION": [
+ "localhost:6379",
+ ],
+ "OPTIONS": {
+ "DB": 1,
+ "PARSER_CLASS": "redis.connection.HiredisParser",
+ "CONNECTION_POOL_CLASS": "redis.BlockingConnectionPool",
+ "CONNECTION_POOL_CLASS_KWARGS": {
+ "max_connections": 50,
+ "timeout": 20,
+ },
+ "MAX_CONNECTIONS": 1000,
+ "PICKLE_VERSION": -1,
+ },
+ },
+}
+
+CELERY_BROKER_URL = "pyamqp://rabbitmq:rabbitmq@localhost:5672"
+
+SILKY_PYTHON_PROFILER = True
+
+# For any requests that come in with that header/value, request.is_secure() will return True.
+SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
+
+# Uncomment this to view SQL queries
+# LOGGING = {
+# 'version': 1,
+# 'filters': {
+# 'require_debug_true': {
+# '()': 'django.utils.log.RequireDebugTrue',
+# }
+# },
+# 'handlers': {
+# 'console': {
+# 'level': 'DEBUG',
+# 'filters': ['require_debug_true'],
+# 'class': 'logging.StreamHandler',
+# }
+# },
+# 'loggers': {
+# 'django.db.backends': {
+# 'level': 'DEBUG',
+# 'handlers': ['console'],
+# }
+# }
+# }
+
+SILKY_INTERCEPT_PERCENT = 100
+
+SWAGGER_SETTINGS = {
+ "SECURITY_DEFINITIONS": {
+ "Basic": {"type": "basic"},
+ "Bearer": {"type": "apiKey", "name": "Authorization", "in": "header"},
+ },
+ "SUPPORTED_SUBMIT_METHODS": ["get", "post", "put", "delete", "options"],
+}
+
+if TESTING:
+ FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED = True
+ EXTRA_MESSAGING_BACKENDS = ["apps.base.tests.messaging_backend.TestOnlyBackend"]
+ TELEGRAM_TOKEN = "0000000000:XXXXXXXXXXXXXXXXXXXXXXXXXXXX-XXXXXX"
+ TWILIO_AUTH_TOKEN = "twilio_auth_token"
diff --git a/engine/settings/prod_without_db.py b/engine/settings/prod_without_db.py
new file mode 100644
index 0000000000..60b4cc288e
--- /dev/null
+++ b/engine/settings/prod_without_db.py
@@ -0,0 +1,191 @@
+import os
+
+try:
+ import uwsgi
+ from prometheus_client import multiprocess
+
+ def on_uwsgi_worker_exit():
+ multiprocess.mark_process_dead(os.getpid())
+
+ uwsgi.atexit = on_uwsgi_worker_exit
+
+except ModuleNotFoundError:
+ # Only works under uwsgi web server environment
+ pass
+
+from .base import * # noqa
+
+# It's required for collectstatic to avoid connecting it to MySQL
+
+# Primary database must have the name "default"
+DATABASES = {
+ "default": {
+ "ENGINE": "django.db.backends.sqlite3",
+ "NAME": os.path.join(BASE_DIR, "db.sqlite3"), # noqa
+ }
+}
+
+CACHES = {
+ "default": {
+ "BACKEND": "redis_cache.RedisCache",
+ "LOCATION": [
+ os.environ.get("REDIS_URI"),
+ ],
+ "OPTIONS": {
+ "DB": 1,
+ "PARSER_CLASS": "redis.connection.HiredisParser",
+ "CONNECTION_POOL_CLASS": "redis.BlockingConnectionPool",
+ "CONNECTION_POOL_CLASS_KWARGS": {
+ "max_connections": 50,
+ "timeout": 20,
+ },
+ "MAX_CONNECTIONS": 1000,
+ "PICKLE_VERSION": -1,
+ },
+ },
+}
+
+SLACK_SIGNING_SECRET = os.environ.get("SLACK_SIGNING_SECRET")
+SLACK_SIGNING_SECRET_LIVE = os.environ.get("SLACK_SIGNING_SECRET_LIVE", "")
+
+
+STATICFILES_DIRS = [
+ "/etc/app/static",
+]
+STATIC_ROOT = "./collected_static/"
+STATIC_URL = "/static/"
+
+DEBUG = False
+
+CELERY_BROKER_URL = os.environ["RABBIT_URI"]
+
+SECURE_SSL_REDIRECT = True
+SECURE_REDIRECT_EXEMPT = [
+ "^health/",
+ "^health",
+ "^ready/",
+ "^ready",
+ "^startupprobe/",
+ "^startupprobe",
+ "^ready_health_check/",
+ "^ready_health_check",
+ "^live_health_check/",
+ "^live_health_check",
+ "^django-prometheus/metrics",
+ "^django-prometheus/metrics/",
+]
+SECURE_HSTS_SECONDS = 360000
+
+CELERY_TASK_ROUTES = {
+ # DEFAULT
+ "apps.alerts.tasks.call_ack_url.call_ack_url": {"queue": "default"},
+ "apps.alerts.tasks.cache_alert_group_for_web.cache_alert_group_for_web": {"queue": "default"},
+ "apps.alerts.tasks.cache_alert_group_for_web.schedule_cache_for_alert_group": {"queue": "default"},
+ "apps.alerts.tasks.create_contact_points_for_datasource.create_contact_points_for_datasource": {"queue": "default"},
+ "apps.alerts.tasks.sync_grafana_alerting_contact_points.sync_grafana_alerting_contact_points": {"queue": "default"},
+ "apps.alerts.tasks.delete_alert_group.delete_alert_group": {"queue": "default"},
+ "apps.alerts.tasks.invalidate_web_cache_for_alert_group.invalidate_web_cache_for_alert_group": {"queue": "default"},
+ "apps.alerts.tasks.send_alert_group_signal.send_alert_group_signal": {"queue": "default"},
+ "apps.alerts.tasks.wipe.wipe": {"queue": "default"},
+ # TODO: remove cache_alert_group_for_web and schedule_cache_for_alert_group once existing task will be processed
+ "apps.api.tasks.cache_alert_group_for_web": {"queue": "default"},
+ "apps.api.tasks.schedule_cache_for_alert_group": {"queue": "default"},
+ "apps.heartbeat.tasks.heartbeat_checkup": {"queue": "default"},
+ "apps.heartbeat.tasks.integration_heartbeat_checkup": {"queue": "default"},
+ "apps.heartbeat.tasks.process_heartbeat_task": {"queue": "default"},
+ "apps.heartbeat.tasks.restore_heartbeat_tasks": {"queue": "default"},
+ "apps.schedules.tasks.refresh_ical_files.refresh_ical_file": {"queue": "default"},
+ "apps.schedules.tasks.refresh_ical_files.start_refresh_ical_files": {"queue": "default"},
+ "apps.schedules.tasks.notify_about_gaps_in_schedule.check_empty_shifts_in_schedule": {"queue": "default"},
+ "apps.schedules.tasks.notify_about_gaps_in_schedule.notify_about_empty_shifts_in_schedule": {"queue": "default"},
+ "apps.schedules.tasks.notify_about_gaps_in_schedule.start_check_empty_shifts_in_schedule": {"queue": "default"},
+ "apps.schedules.tasks.notify_about_gaps_in_schedule.start_notify_about_empty_shifts_in_schedule": {
+ "queue": "default"
+ },
+ "apps.schedules.tasks.notify_about_empty_shifts_in_schedule.check_empty_shifts_in_schedule": {"queue": "default"},
+ "apps.schedules.tasks.notify_about_empty_shifts_in_schedule.notify_about_empty_shifts_in_schedule": {
+ "queue": "default"
+ },
+ "apps.schedules.tasks.notify_about_empty_shifts_in_schedule.start_check_empty_shifts_in_schedule": {
+ "queue": "default"
+ },
+ "apps.schedules.tasks.notify_about_empty_shifts_in_schedule.start_notify_about_empty_shifts_in_schedule": {
+ "queue": "default"
+ },
+ "engine.views.health_check_task": {"queue": "default"},
+ # CRITICAL
+ "apps.alerts.tasks.acknowledge_reminder.acknowledge_reminder_task": {"queue": "critical"},
+ "apps.alerts.tasks.acknowledge_reminder.unacknowledge_timeout_task": {"queue": "critical"},
+ "apps.alerts.tasks.distribute_alert.distribute_alert": {"queue": "critical"},
+ "apps.alerts.tasks.distribute_alert.send_alert_create_signal": {"queue": "critical"},
+ "apps.alerts.tasks.escalate_alert_group.escalate_alert_group": {"queue": "critical"},
+ "apps.alerts.tasks.invite_user_to_join_incident.invite_user_to_join_incident": {"queue": "critical"},
+ "apps.alerts.tasks.maintenance.check_maintenance_finished": {"queue": "critical"},
+ "apps.alerts.tasks.maintenance.disable_maintenance": {"queue": "critical"},
+ "apps.alerts.tasks.notify_all.notify_all_task": {"queue": "critical"},
+ "apps.alerts.tasks.notify_group.notify_group_task": {"queue": "critical"},
+ "apps.alerts.tasks.notify_ical_schedule_shift.notify_ical_schedule_shift": {"queue": "critical"},
+ "apps.alerts.tasks.notify_user.notify_user_task": {"queue": "critical"},
+ "apps.alerts.tasks.notify_user.perform_notification": {"queue": "critical"},
+ "apps.alerts.tasks.notify_user.send_user_notification_signal": {"queue": "critical"},
+ "apps.alerts.tasks.resolve_alert_group_by_source_if_needed.resolve_alert_group_by_source_if_needed": {
+ "queue": "critical"
+ },
+ "apps.alerts.tasks.resolve_by_last_step.resolve_by_last_step_task": {"queue": "critical"},
+ "apps.alerts.tasks.send_update_log_report_signal.send_update_log_report_signal": {"queue": "critical"},
+ "apps.alerts.tasks.send_update_postmortem_signal.send_update_postmortem_signal": {"queue": "critical"},
+ "apps.alerts.tasks.send_update_resolution_note_signal.send_update_resolution_note_signal": {"queue": "critical"},
+ "apps.alerts.tasks.unsilence.unsilence_task": {"queue": "critical"},
+ "apps.base.tasks.process_failed_to_invoke_celery_tasks": {"queue": "critical"},
+ "apps.base.tasks.process_failed_to_invoke_celery_tasks_batch": {"queue": "critical"},
+ "apps.integrations.tasks.create_alert": {"queue": "critical"},
+ "apps.integrations.tasks.create_alertmanager_alerts": {"queue": "critical"},
+ "apps.integrations.tasks.start_notify_about_integration_ratelimit": {"queue": "critical"},
+ "apps.schedules.tasks.drop_cached_ical.drop_cached_ical_for_custom_events_for_organization": {"queue": "critical"},
+ "apps.schedules.tasks.drop_cached_ical.drop_cached_ical_task": {"queue": "critical"},
+ # LONG
+ "apps.alerts.tasks.check_escalation_finished.check_escalation_finished_task": {"queue": "long"},
+ "apps.grafana_plugin.tasks.sync.start_sync_organizations": {"queue": "long"},
+ "apps.grafana_plugin.tasks.sync.sync_organization_async": {"queue": "long"},
+ # SLACK
+ "apps.integrations.tasks.notify_about_integration_ratelimit_in_slack": {"queue": "slack"},
+ "apps.slack.helpers.alert_group_representative.on_alert_group_action_triggered_async": {"queue": "slack"},
+ "apps.slack.helpers.alert_group_representative.on_alert_group_update_log_report_async": {"queue": "slack"},
+ "apps.slack.helpers.alert_group_representative.on_create_alert_slack_representative_async": {"queue": "slack"},
+ "apps.slack.tasks.check_slack_message_exists_before_post_message_to_thread": {"queue": "slack"},
+ "apps.slack.tasks.clean_slack_integration_leftovers": {"queue": "slack"},
+ "apps.slack.tasks.populate_slack_channels": {"queue": "slack"},
+ "apps.slack.tasks.populate_slack_channels_for_team": {"queue": "slack"},
+ "apps.slack.tasks.populate_slack_user_identities": {"queue": "slack"},
+ "apps.slack.tasks.populate_slack_usergroups": {"queue": "slack"},
+ "apps.slack.tasks.populate_slack_usergroups_for_team": {"queue": "slack"},
+ "apps.slack.tasks.post_or_update_log_report_message_task": {"queue": "slack"},
+ "apps.slack.tasks.post_slack_rate_limit_message": {"queue": "slack"},
+ "apps.slack.tasks.refresh_slack_user_identity_emails": {"queue": "slack"},
+ "apps.slack.tasks.resolve_archived_incidents_for_organization": {"queue": "slack"},
+ "apps.slack.tasks.send_debug_message_to_thread": {"queue": "slack"},
+ "apps.slack.tasks.send_message_to_thread_if_bot_not_in_channel": {"queue": "slack"},
+ "apps.slack.tasks.start_update_slack_user_group_for_schedules": {"queue": "slack"},
+ "apps.slack.tasks.unarchive_incidents_for_organization": {"queue": "slack"},
+ "apps.slack.tasks.unpopulate_slack_user_identities": {"queue": "slack"},
+ "apps.slack.tasks.update_incident_slack_message": {"queue": "slack"},
+ "apps.slack.tasks.update_slack_user_group_for_schedules": {"queue": "slack"},
+ # TELEGRAM
+ "apps.telegram.tasks.edit_message": {"queue": "telegram"},
+ "apps.telegram.tasks.on_create_alert_telegram_representative_async": {"queue": "telegram"},
+ "apps.telegram.tasks.register_telegram_webhook": {"queue": "telegram"},
+ "apps.telegram.tasks.send_link_to_channel_message_or_fallback_to_full_incident": {"queue": "telegram"},
+ "apps.telegram.tasks.send_log_and_actions_message": {"queue": "telegram"},
+ # WEBHOOK
+ "apps.alerts.tasks.custom_button_result.custom_button_result": {"queue": "webhook"},
+}
+
+REST_FRAMEWORK = {
+ "DEFAULT_PARSER_CLASSES": (
+ "rest_framework.parsers.JSONParser",
+ "rest_framework.parsers.FormParser",
+ "rest_framework.parsers.MultiPartParser",
+ ),
+ "DEFAULT_AUTHENTICATION_CLASSES": [],
+ "DEFAULT_RENDERER_CLASSES": ("rest_framework.renderers.JSONRenderer",),
+}
diff --git a/engine/static/images/heartbeat_instructions/heartbeat_grafana_1.png b/engine/static/images/heartbeat_instructions/heartbeat_grafana_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..d78c3a0e0272a537438bc866f04d002894302a5e
GIT binary patch
literal 181505
zcma&O1zc2X*FLNWN=kQkhjfE8wa9QS{|i#?}$_TtxZ@OgsF
zo_yU68k9B^3LVShzy96B8*B5_(fBEjQsBgxrBk%`|t%Rn-vu7COkMHM_
zN@R!6o>8QSO@F}PaUKGu51>&gxOYGvf8N91Z{Y3;!6%1836
z1ULBm<6%Y;qF+TEE%-<@WEF^nZS0MR*cezDm`M0xiHL}J?G25&l|;n;st*2-kHpl`
z(UzN$(Z$7u!G(pv#@>YS4Hp*|BNH891qQ~><>DxFt@{y1{Ug&>bzuRf#YWBZZvUd2ZTi^~dKK{b^hJlIkzg2TIGyX%h
z$6tO|`_-@Cm*agr8MlI&tC6LKh?$joyRg0JLalHOx{C-c)ucP4E;fLjA
z{GXZThmEI*+j#a&@R_8@TNT&myP#K|ct2(?kLH?`TPZv{>-Eg(j2IxF=)*Gz=zsY$tPBai5rz|HVNb)sp)VC$q~!#q7_3!^ER0DeZp!zX^v4~c&IbyTri?}tEM#;wz829tcjYkSV#
z)(mZoOHxHlwbJ0U)_Y|6(cpg{rVoM_NT#k~1fHmi$v5_mwk~MJ9LJDKtm5AQ*I4$f
z@L8d1fYcDLHo7ml<6D6X=O0}v*^PrUc>e$tM}g!CIh
z(&NBC6A>PIj|oAIBcuPW{3Cvm-?b%G0(ymcRS#DKl18k^`^pv53DM#&LDElZI0^&`
zA~Jl4>4LkEdtosjC%2Sf2C0gna3(LBbG9Bb@dzSuL{WMgO~9#@=P37_V?2;H?s9+1PuFh}YMn8osFKhH8f*
zO#UQfuoTmHo=WGc;eKqXXLq)&$|6tMNaE0}Kf~OVOF@}9$P&o0IUspfNO=uKq(OPM
zgx#TK=o-$hpO%+GoF!%#|PsHLNKz8-{lBg$^vJRsb&$1r8ZTq*gnra(*dc@u
zIlTI=f%&eFoDTdO?%2QB#$T&6`g@&jGsiB}$YwJHW-lU4)>5`7Y0?)}yuoz_0>$Nl
z@NLpdj_W8q1F?e+?59i!l4;zB90=+})bNp_5q*%EVd9N1%+^O`zOqy*jGwQB3H$^m
z@Qi27g9+X(H-1&z70pB~(_l7u(w=pJu=?!Bv_e7p!!J22*CY!Nwc+h8HhVnZQR|^#
z-fFIkc$R4g?O$OWo07KeCw7EtEbNcC!4y7Lz|~7F&KUZXeM>|HkqD{O(OO2raa9wo
zqy$Pd-rOSi87WE4@mebUqf-#W$Y)dNKsHF9K&PmsFQywpH-Y0?ZkpXxD#-t0s2;cA
zrWyo&j}r1LEf1MK8Q1qh8R8s}m`R`NUYfU#C7rzVewuoS@Isfh-BU0lQO%o$jZu%uHa7Ww#^<}Q>
z%6cnK>oCklC}|bfP>oi?`%vdhL!u|0`)l>NsWg#TznL{b)VzSE!_|vcRP!;ofl&V8
zXs5esWXU(WZ*Sz@i_BJ$0wdaPmxEnx(dJx`KLm)HVJ1^{=e%HW*2IyBAOkD=@mj&&3!n;Rx%3+%jCf^`QfO+MCfb8DmImuJ0U1&Z{0cj8
zK$x(Pvt^&;i?JnVLr;b=`1;I85OA^3a5`-%Il&Cy{^Hv1<47tsg7E@%m&
zfX1naJ>pi>pYCpk4Pi50^J5+j%6ten?AY`&a647y6P~evl}i0;-sUYS
zmN2p&IYKUu6mtED|C1JS87fdPRPg)T-F1ywj-yD)9<}+8i35z!k!fBF330Bnj2QLz
zj++=!NXZH@j5}k}@RHQhR7D0ADc;3WOcY%A5UoRRCyOX*kZLZYde02Bb2S_1y*2}|
zhm#<&BVQsn3C=Anl`Om;!`el1*?SNaw
z8|N<`&YLu50kO$#lmawfoxP~d8*&oRqFxPx$ap^9;rH!jW{V4|*ml71%h$LaZemB~BggU!LLuJcn$9oUgmPR#j6Atg0~)
z_A3DhP`wq4(SSwgN%woH^38wuNKZ^AF9vlT1K)b)R9J{MOHrSuG4OuMV1cnw`7AA?
zKDvrVSJWZA;_RN|)6Ro?Wl4XoY_Dx@cMb`Pf?l{lv${}mx)s-s)gb5ILf4_kz#N;6
zrquS}8X>wMimgIec%sw7tqLCw?6xt4&qpYWs>umtb?Y$PBebw5t+@9vS=rfA$#453
zKE^h`OG}i9c0@at5R`t*CU+
z=Jx*Mw$To)8v114nwae^PY|W-fC@2wioFCd_yjsXF_)x@t2F&7RPGr>87h;u8l>>9
zu>z?Fg29+(8lCQkthMb<
z%B3%l^Au(w_|*gsi>CVPi*QU>3tDp1w!Kj!ApnXV7IVe=n-^U33@{r&<-mQ~0uKI)
zrg9}HzRHcVE6Y3CxdK`Me}d@xb^oD%QOE20Bld+(E~4=*AOrCP`{2;pQ+jzy=leR8
zGVc&4g0Pf>>|Gl@kH;p8(6Uk_s@g+5_CjgCq5>6Tjbd<64lik0GSc)jMZ|=tlK_uK=}uzb&lcyr4Q)^o
zUf~2~68g??kzBD31m;t?=m$Zx=k2I#A~ziI<5z^d3O6Y*A?6l{@zM@g2s3eQ!>l1b
z!{Lh4R-^$wdO%TVb5znXd{l
z&rkYy3(ut4j5o+icvLaVv}AAx&}$3>F?5HcTDLy^``~~{J;Eb_O(u;L7T#cwKaafu
z)USR&ER0k+{fPa7SO9(sHK@^Kr~iDj;#Au1!Bpxvinj)~1KJtBLdK!T^ytKMO(i^#zVe)cnS|Q8UOyxI&yWAnsKDw%yOaNChT|3BFD$
z9+`|txQzlH1}ah>J#6_+5JGq<<)r`{`*H|*K37lr#pT2KA2-1LCbc09+ml2p^8l)#35O!Ly24e-6g$1
zSyw|Iq&wSK3lR0g+>hprcfwGrrYLg6_&8w#Z?tZ@qQ-YS%g{vGnlZz#=V3a5Iw?^x
z89`oMwSXt_V)5@|deVFX;-iYS+KD385}SdyKm}Ns6`=e>eRQ*FA5aEWAT22>k!<4*
zC4e|SZsNwLV(jAd#woWTo80#E?q0LG$O?Kp-wbv7&$RfphxUvZ3!RdJBR2ofyiT?E
zp(jx%N>xoJAB7!z<)t%}bBvXq(kvusR4|P|85&0J%A>1UKi^5z9aS(zQCyDm%Zfi#
zztr}~JlL`O5!`X)HF1w0^+||fMrl{Ky5O3F$0At~t^E2fyV;{!ZOEs3rR;72c(gVf
zQg>a$DB--Yn$va-HkRX%*fCEt(MaDQRs4ls;b-1-8xtfhPqIcV&SwdafxKm>UaMi<
z2dW^af4UmKgbX=V@NljL@C;(5xn|Rl`M0^ft(v+V4BRelQQe6>MR$Mi2{SwF_|Vl=
zmwPVzjYrx(6f@$wun~caepmSojY!K6EZOdFPr+ZJVWsEAq!851C}*!QawFrAFNZZy6V(2
zpOI!d3CG`}nlmE8*ePIB7#jDWFQ2F7e+uOw9U=`O*RI!PKf64!UW{bc9WM!!FPhiA
z*}bukhd_p2SYU
z1|}VtjD*6ahgDAj)q}2=lcNAbvz=j6`i50HgNkfhh+!mk9+Pj0WE6q-TlQpW{t<$t
zIPnNBL62g(m!+dHk!aQ@?vmn(bhK8uj{-c_J^fE{1_Wy`fyHe4agi(Kk&+SGyOMI_
zMJa6B=0UG}5QyI((3HQ`Cv}wFtBnLAsdDj2j`4bj)Ki^YFAB`fJEBe6iuc~`L=+wk
zaKcM|AnvoJkK7aGe2R1`_{_H(f<{(ECYCP_|dze)8B;rOW$<=D`&e%D9pJadR
zNQe}nibE7lulLK9PIa}Dmm>R730kD1Rgj{5+6sG0aF~-q&43v6wGBZQhSuXyc)S)l
zF^9FIQSU0Ee>Cf-$zTRA*h=%mvmI<&f|Vq1_hiv0th%pVupP!05j-1Yt62@&1mK`f
zVLxiU-rf!|F`BXAN8KMv5}D)kV%|1BVoIw^fiAFKlWT5GVe^&UUWu-b|MUs?fj+XHaDknGv(13lXah
z&`VNe{V3F@<_&~h9o9Yx>gmKq4hQ8ndPuQa_!m-Ah-{7+DrZ>Y>x0tjvV#+_gr((7
zH*T!#VUnMs+dlKy60i!A!cIXTd
zb^}+DFQ_4peL0%eiDru(D$_L;8$`(nfy766QSj|Rs@E)^g^e92uCywoB|47Go_2X_
zis5@~>V?~{-UGxebN*St!&)-70NS@!g~UfliCyyt!X&sw0=_Byr*5R5pEntiBZtSI
zPqciK2gJsi-(I~N%4?<%&c;fkT*fJX28w;Z$(*^`euzaKf^3`)^8{`IE@CU$96ays
z78@J8XG)Ass585R8@Gp@vJ+3wr=6&Kex{&FPR1Be5?US~WO+?2Yp#qOTiTZ0fDp0n
zefEKIg<0+3LcaF`e&EO?yP+f{gR&kZK7?b$=G%dSddvqW?&^#oR(sCV$6J?R?#b35
zLtU}cxvMve8&uh5@6%DIKec&r)1MSyETiHFf3`&hx$Wy*7~!WxUZ|KaNv!~brb2%>
zr3in@as$a<^fsA3=1GK}NQ8)<4}dFRXRoKH*Bw%=zBX`2nM3Ji5fDj(O$2`>4uR=-
zm2QS&su!q$g<_m;L6iDcN!68nVVN}Ix*Goz5+X4lul3Xv2Bo)gNmeD$6{3o(YgxZ3
zuMG>hh4aO%v0S|6sw@%6hS=v^+61zSD21X>{X|`4OaBmClT{#WK;3v&)!GdMg0+BF
zD^A;HnU*6G?QYKseF6*!eu9LU!Nw^u8z0U?+0DdpaBv_e6>T{mkU&oUekSL8Fyj)|
z2tSnP9pxhw0Ap$mlL3_F?`MT804p@!?ej~*qpC9ayH{NpEum4+JXuF!7hdvK90iT;
zQc;sUKVqYo`}Wpmwemn0p*y*S7VV7J*X13OFK{~J{n5mTUIhx!OYX#{w^Y*F4~tE&
z?u`kGg`V3PqCJIZ=u-gW;xNFmnn5;s?A2z25RW4&mf5%VB6;`%WN-meuk{Gjz>b`E
z`$L1%K4Fu_MkBkvraqGrYE`0pJ0^L4$ENpweDmsgH*nh&OE=z4HOb#%6V7K}KxN*p
z49zf$DGiG7DK_2lgCG$}x?YYLLKsaef5AwvL?mYBXSCJ?ZOXpSCN(z%9$ItWGPE>P
zC(Knk`6xs0^XM5IXF7#kw!CA%2~Fi7vh)`4YDJ{-G^a0YFbk9HO5(s0fX;%Fc&`%f
z508v!s;mMn=`$KaY3FefQ#>*P?dH@-NJZyvYjK{i<$S-uf!+MxNrE?sS>N_E%O|JR
zH+`JaY<}w9&1!lBrU2!ALF-eF4s(d;ip2S(m7)e=tB^z2ADd0>P-F`wcsu#jDd-6g
zCNe%V5~I;z2d>kUjO)5a2!`fxaXrP&5ky3hkd{8E6@;Cu`qsb5!=^W@0JiS3{EqFn
z24%#UI5qojMG0&Lz?VRw4I%El^7nR7=eWWLYx87#;3+n*0xn
zib_jtW-ilDs~C-
z34Cn2kn^ss1w;D*GmQlZu`%oJ2*D<$MsxY~SfIPNdkT9&T2>V_0gCi8R!hNP#j=;z
zp7oT4hvzF;9tO;SDjXZH;siJ0D7`#-aWYKtZ{~N7Y6BHftnJ$-POnv7pgYL)G^Z_;_j;RE;~XnH@kmYU14BX4=QPi-qAp~3etjr3%`hQUSG%~xGL>Ek9!oLCtkyF9^Ju})FC)xMPw~o#A@tHw!fBIs7DB
z{~5APh#ZHFwE$B6>m#P|nM;Ssai3$jk{|g)@?h1c*Ll*Q2;m2&zawWBic;x@xe{NC
z+??u4niAJ#Ze^Ow%8k_)0%RO(=6%X)>Co=pyId`0c#z1X9h3%iq
zfc4k{#Kf31i~%#U2w%-+X91~m53300)g{ZQPgwEbBEfXS3BG6suOuM@hNP&7u_;vG
z{bzp)3lWi3h!7F9%@+$ZOXpRn6~IWc6RXBA$E6jN&JMQcO40=^GR}R-BZ8L%+h=
zht$dgyXa&;MVNF0*4c1X31uOe$l}@zz`oC`eC$s7G~1
zlXgAq7u&jd23ht?+Z*yYp->cwbO@sP#6meJrP}%OyC63KnxFm&k!_UA=rmpduq}s)
zT=ZF+ldNgjT+2M}vnNXqX9bK;6DEj;wV%gEwtY4<-G|znDa{bmCsPT8fe8eDhY3jq
z;&b))qoUvvAE7D5RrS0i(5b2jT36+wS%3``dyU(MCsTTCb9+`sRmm8|Hx#!NYomsGlO%IiR9
zNDGZ)s!+&NfPrbYO`4(YXM%I8Vt9kyY!%cWsgm;4Q7-vln6+7Hu=BR153plTUZ4OC
zlM3udwZ{^(%io9}B7fKtXs&lZIC$7Qu0=h*+5C8I_VLF2qdi~-J5Xzi0Smz4{yrru
zNdtt^B{UdE6k&_D&Re`(ldHL8_%YB+0=vI9ST_PIxf~vfHo%M}C#ww-ExWZ*%egEc5
zEz46VAJQh$bI=+yw;>6|+b4LM;3D<)A0U-X0bXH6h)feu&P}7wC$K!ChC7=NP-IAhdbqywKX)bicVP6
zAQrCQ$-c(c85@vSt$-Wd&P!zLLDNrBi-uvxx
za!V7{=q|{6-`h$_g*JWI-rlyEzgo*^i*|;C`p1#=orJ|o$7Fw`jOQw0b#-;+Rj>ZG
zXO5NM&(dmputkZ-q*ZZv-0}PW8H|QC1gp=3u}ns4G1>mQsHwWI9_ayjVF|8HW!{NA(Zl|WKi*YZ~tkq=Jf_v=Lo
zpD^hwJ(|xZ1NV_7BGcm4HC4eOR^vsSf9kcV{I7J3cOK#08h`IvK@aG8`@Y;j1YF}V
z!^^vic@JGxRaFmoyi$YDxE$vvX*auhilj|C2RSty0+&ne{>mLY6^5X{tN$O6251V7
zP0wc13J^~I&X5sAMDOnHtzRLHZva`}+#3~!`?Q=AHq5?&o6`1*6;bRC^WaV8sxWD~
z*!LLXz0pQ=T#Ea%nh;#}A-&5LB2fHqSbInJ&2dZL1T8?jMgK=RM#ShEBM=zOA1+)V
zOXqfZxI9=fmLuz=Mos!>FcI{CVLe;inX`D^`8Qfh6H!x}jc=A2bjb(5v_Es=*j=bI
zSX=;ARaK2#pZcG#^$E38bbiKWaUK3jDjwmSJJ)i1EG^*Bm%GPv4LTqIT=iEtvt=o0
zd)C`5;x*Vk_`YyXfcS^~P?`2&cJ|}n`&FM7n7Uvw3qHT`U;?L`y^xSTYBa!Zg6%lB
zxp}{7Z;r!CyTxcMCp7e!;@YK8`DbSsKC9{b%VK)nvfUB!2c!jp7J34g%m>Q#b5=_u
z6mR7r-jjKJ7kEiY$w*~UQBlU0TPatG9Nq_*O#&W9%>vI!;Mw#`;kHn
z2F*AiaClQB8Xczk3%6I`Z9bSUSe^N#Xw5j1o5jxW_%*Is#n$bY+MJQ#wI~
zOvpW{Prqkq(u({as@ZsLf%*@EhDBDCD5syiyZPIMA&Yl1&Vx})pC9ff0Y2`hHH;58
z0h}(Ojkn+z%+Id5jc@|KYL|m%bzQH$M2nJg9FX-z6#1t#clT^iZWHVnW9`y*p#Zzx
zia+qV7yn`jpdn4?svnG4!g4dJGo5QkKDPsd{Pvr}={(KH0uIJQ^;P8_1;;)UgL-|pFsqm5n-6Ywsxp+@F|F~8y4#-eE$#z2nw{Gd7Q2G5
z`BG`;2w9tR92+GK&3xvo-N7|qR7$GcD;zmc_jePg
zr>FAX$DK0{#vq@Ep>TX2jIsz;3JQw*A@vq!pZmsOe8WgM%>FbUlk?1o`?sTjY1){)
z@Zh_Dklw%Uz)28^%6J=9?<{iB{+8&!dO_vLr5*Li`>ZD$^m-$Ah2wE~?{1Ee5_adt
ztVQ{Le1(X`X*#?_e9UD#y!3gYDTzRwg4?OUc4saraUVn1g#7jUnNjldL~`=>M7B~%
zRTQq>MW6Sp-6uuLQ9t$izr67{-!1c~&Sui@2qyG=u%0PgN&YC4#JzTZvwzpg(L%4~
z%?i>~yFA(v9ur{olB639o%Xr_(Kdv#^3{p}J$I6HTh6K(;apvZ{QdnK4w|L+`EFw>
zMC&En;7#EEAy~em_MM1Sbe^9FBmB{N4#z^4JJ>fLX7eP^ptY&HT&s+XJ=}I#Sm?kA
zR9SR+9Cumk*1B%RPHD%FaZFjmPuB=pnKRG!mZP`!!@d=Zw>tU7B+VyN=
zN~l@7oU2=$p{0LDwigVbC$|MYdovX^Sy>{%#)DtH*Sh$*jZZg$*8h-Y9jyBtCMy`&
z$>n_z6#JXN3?D(jVbCWqZQYulVBiUyf4{pR;3=p+G`$C_V6vItH;7nR
zM?wiIIpJScNc0~{=BW;LO@>m9N)?3U7hDZe0DR!(A4jic(*MP}Dk~>vu)($@EB_l$
zWrpomHX(t4ck)S_?)|a)gZr{*gXrWpy`MQC-P=(_pTj1_OtE)+D8aTr5vCR?od$
z8R+MGb3Q{&0$gt8Oz(oi%^2zE8t=kOVZXAa3~lf@?|vzIm_GWMR(tf)1L5xaY-fy8
zT8Lp6gIfD)gqEx@pkhtn!R=~UmzU(DM%eb7;^?L58vcvAA#LxO?7`H32rYrf2edZ2
z;&N7m_sezhmBW;fm2KGXOv<4Dd6<6Dax?H?Wrh~Om#9q@-$uE%7!d4$7ak)4=H{
zBLD^l=De4~dR|3S(@Z`4TfN
zkJnwGVi&T1$9uTc?u-@ynmM?5xSnbOwA|A-1pEUfJfeFqK?H?Oy>bOp?)+$XeFrfv
z57!8LHbFcqG=niQx2IhKc}01_uGI-)_I2E
zdIqYhv0lpHh2A~XS5Zh${~4br2h2)u3d+mNA1*cz_RJ-gms&p-POup%xP{l7l1X||x0`L-yb^{Lj)x!|$Ib<5?IytGv4UYr`HAu!0t
zy9FN04pQ?29qCHGs~O%K&)VBEby_`yq#DbrLN#K;3d9g&jb=|XiL`ZfOu8=zyN%9!
zJ4QxpgK@y)xugZ>>eIfRjiSI9qdSlDeFU@J%L2Nedo>&!Zw`7;DN{_0To6sz7+VyR
zd#kPHGJhCqsKXHXqR9#VnYkU&exD>by4ls>`UPYgaQ(WqWV3@fjJy~9`+zX-pDpx#
zpbCrJ|7nvEidyim+VkJ=gG;Oub$x=5ZlJiZBf&4nwh
z|2jE$B~N>lb0EaQ0=@22W2YVZgO`yL9tcPJ&w-Gy2oQlm#eB2hyRv^VFFwHg<*05+
zfIwFY%qz41%#FQHMZzBnx;Dp6smLVW-ePUmKm5R
zDk@rAmrPK3ILM@M8%b;I!)}m|^m<*N+NHj$bRCJwS5BrUHmX^m3Q^aI#-w_@!3z`RSJ*23mVr5Q+LQMxVP0wmr5_4tUt-vk^WdAY|wFX|X?UFS2XlvoxBE
z6xAXOy22xH*hZU6fo)*VF_~TsNzBcyuHqIOa7
zjBpnI1HNr`KDMP3Frs7WHr$c-(tCyEN`k9F@)^rMFV+BB2jz$@{@D$
zc~VsWyx^f7wSGwke~Wy>IKFQ`28lH8g7)S}s%xY(;R)UVNoMtDjYg*t$}P8|*eQY@
zcck;b}b%2PTzI{TMOtLx|Fzzj-Wm;FEL*n*Ib@qN$!mc!l=
z0~6ygjtW-Be*Awg3j{ka8yg!nUWaS6aaE>Bk~Ip7ip!0L)5Gb5G)Bmrni`vq-FvuP
zmfbT!%#S_zKVr$RV<2HcW+hY5Pq|vS-@wVOBzTOyS>Hh#ND#ICed8psx>^_T60s-e
zeHi_xNb;Wi^CX&e(f~-oM{c9bf4sBa&+V}OnaBDe3gj@-77)RI9dv!|-msP&;B|8@
zpTs_~&u-Max_8iMpiUhusHhk@rg!gmwX-<8Wcd%7=YlTi!G|yN8Ww0S0)DkzkOx)-
zPSe&JYb1P(D=3s*w33{wbdP&2CML!WbI_?|W9^~YKF6&KOt;lkRcEG*4NnvKJmLUW
za?2o>MF;1GmC(ME<7+N0lDM(a(TrvBUBaobt_XaW5?z8ghg`%B>cZsMgc%3hHk&kz
zKiLppYqIx95+h-$6g7UoTXhRfr@4ZAKOuZdW_xjih9Am8*{@sNCGOtKMkDS-;a$MuDnLlFu
z<~gIZAzK8vsh^tjqgT{%Gpaf{&R5O_)9CcCOC38lMGoOi7T|eou@7RYZg*3IU~6`{
z0NAUwQF~u~3KIK_wah!WT~4-StHWcpjb>8i(1%ideA;W0Y*D)!h3Ipw%n>9mEzxHp
z;L(?iK>=kKF7xO6ZhaZZ!M5(GGQmB*+~Cb$YjcgzfQQF%zMxWx666aJvv*#9Ri>GOCOb=LBa=ux5`?1+^u2}b~cP}cc
z)jUR4kl}t<)H$dcO)izrQHS4l={wZPh9-S^Sn!ETZj7$$azJ^x{l>UDz-g_f(=388Brsm~8iOcj0{+8o}A1_(!A`Bua&Xm**`_t@=yb6V!$UTL4qa*FA`3V6KY+
z3b70=a~~+20(gXZO&l9Z6ZqVw5hocx!f0>WpW<7BHqAb+^Lf0%~$Q
z!MIWB!YnAXP~$)8mrc9%ci4twOs4hmSDgR4>(W_6(^&{3n-Cvi0d%V9Z<
z#b1hK1b+TTCRHL~8wx+pZ|Ua0VwEHDb6Eq62!>Nm4cOmk#`ku~4=x9+Dsde+4iu)h
zLJn4p^hOQp9V~baR?w=^xu_*6-%r6YE;vnrL`8EG$J{{m94&`Gw1m%pnhplYo?gu@
z5)>8{S!M`tq#j>+3N&v}mNr#aZy8BAhaPxe+$
zj(!A|xIIv1tssseGbcxHz~1fh!=%*q&W?}$qP)*xJK@1xjew^Wcolm640F=GT#0wy
zTiBb=%ql5iXk__^LGB3n!0x?r6%Zh5S2kIzlU38K7WKZ+N`)14tHRbmR2*KQ8BMQV
zG`5YhWv2Q9J-V%4yXuQ-p@;B(&DJkjqd?w)APJiWNO)FUroI9u;D^dD$
zwCrm;)aPMr+{bhi4{$!ryte$eLHWOD_iHX3c;$Adg{uQUz)1FuZ1>~gS1+NiBiq%#
zeesHWE&`8hrAX-?;J_QKsgio9qSO8lQ|8uLDkuc&awU30F~M-a&p7eg*LoZIi2Ovr
zwyn2+L`R}3;fYSelBDe(wGpP9Q@{@dSGYP@cd``#~L-S?En@n*Y9z1T(
zY0d0Ehvfxtp|LFYkOdGw#r~3<9m#z^e_j}gJBf%b%t#3}YoN<7Dq_u1W^#={`NDC~
zCJ*wAp0}5h>Xkoexv=r^*v-#XzxL#(^qbpjKF(C1w{1Tg>;x}F8&LhgjL;uD@|Z|V
z1SAP3!1(PwY@$Hq=bOcQu4ZED+H1yZgc%?sKCF*mKAhdQqyoSpq%G5&U_n|a490g<
z9?lw=lw@S{o+p%y4ZBfban@?11hlrcHsf^-kHfp!R_74-8h<#sk?s0tJTNd7(5=G`@ZX
zQ1{%SzTU=Z--FnKD*}F*QNbO^9N9FtK1}O@{(f`N8ouqsOoa)--5IIY!ou`(hc}`Z
z7@MGmi=q32X5SaDsyEpmPB&T}IUs$#|KNW`wO_2PLy%}!{r*=Pl!^LdFmV4@>h0}<
zIgs2~;LclDIM2Ud|D~M95rPu@_=`PbdL*v5M56r!FfSf$4?Sk>apI2k)opQ44>f&D
z<YjyQ`F#@hw7#{UQd>PjTWgQiP$LtYoKF_;l9^--?D({cV(K!}Qx4y`Ygnx`kVmgwM1mCrT
zSpD-0{KN~Wf`S5_`vY1^xl}y21-D}#ydsTNT*QB_A4|qIQ9We*4ww$V_oW0^VAyxpUV>#>qc!8gTwY
z8~cIjW4^5jwB_D1%;Z(bUrb98s-$^8@G>+5$P)?GSMGCB7?inyPKjh%{b4m>y2_&F
z*6bYJ@z1&5L^@E8@wEEMqKylPJHHX-D+ucPv?sO2RIveZ?)i(}a-$=BCS>J5k@nxr
zUF;WgUpKBr{taUK#7r*t5H&pZuXD$8<%@j`_Ufmm2=10_>2mltzI+UXZ?e7j<2u0r
z$IKB9==$P*l)5_`T~hy391iUGk-%UXW(9W$;r?DDuYE(Jb2-fset@Igx(#=eKDTE>
zDd?ia(vkT_&&%O8W!ii3vQ0m^IHvzOtoAsL@8L}UBg8BV1CE%mu55MPALoLj=)t+%
zwTf}XpF%jzDu0OY%8>qPeg2veaPG*rs#m%%evd*T%4TdN^3iF-f#s*^1?sR73{EBA
zVkF{gOgw=}YPk1*y#LJ?2`p#>A0!6LO=B_U^_vrGqeY0
zWzKIioOkU_(A_Tvlld-$61=9rEz9#=xq1NrCdK}4f1Ec-ljUGoM&gIUocf1m%rp^!4Ydl?PX;p3_Cfu$rUrcq2D@=wg=gKwxz*1Yp?CiUZ-an^XI7WQ{TTaRz7Wzj-0rR59o5AXY
zijB@8cidtXjK#bKa#~WcaQhOA?&0(#4l7{%%AiCa1(!I;Lgb=>8TSr^+zYy@2PoVS+hriyj)I6u4{14PUI^pu&elyIhy
zN~fz`LruSYNLJ~kvX)!3(bfE8xV3?8P%L+k&dc)4lFAHd3rT6|zP`SA)T%1BT=^6n
zCR<=vG?Q!hw+qDmh~b8w935=zx;nS5tu3}HHWMkS)d6=eK*|Yh$V&++si-)TnMGo1
zE)GWU#_k`jFyZI)q^i*s7|1qy*$y>2=Al_g!$AESyMus
zbuF3b|Kim-IK4-T1TsXLNn1=&FMU^7PnR4jVf>oAhaENQtksQ1Uvuj0_Ws2xjl5s3
z3Nsd1lZlNL+ocXG|J>g%6B2qj=>cxNTpIsGe-T+vkFQN_(InV>j_j`WxKPi29(10|
zDlbRS+cf_I*vGs{Q`FE@sv0D@Ty`2iEW2)$=;G5X-M-IYxJ_
z*)8>lX-p{sILhS$FpY^HH+L}(Hw4r5sTH(
zl%;fg-dbFMvvL2dYG+Yn$1``XPCAtR<`9{54N#sOYZ{0O1%)C(yoO9FUQGoQYQr=h
zO5t$rRxV;gJqFTNTH>r}VdT%QaH;bEU<%RdWX{kRqcBVPjf<=EeGvVH#~?f*
ziUJ8Snm$PoJb99?gl&8Oj6+en92#Ctmu1HLq+L*j7Ks5xiolNsInXBvCz*5k_iKK!JV!Q;@25_!*Q52o0$n~1uXs*-W`V5_`Fmtwv4UMk3Stp&A#!Y8Dp_HwjO
zSNkj(Upg`}l9+L~*b{efuzrruc^*=-^RRPPJ!Nm%CeYeMR7RK~2(f
zY;xNp!|L|ltSX6`q3m&enGiOYDSVfY!3=Yg+V+I&_fE1zt5^m)gN|*nIRe)9s@ui=JQoqz?7)N71E+QS~S`oQlGmzO&SQ^|C;4JppkLA
zeSfp(`Y3aG$sgWP_lYxsvWiNPlU-IrT}*C>)dT9TOff#rC+b8at@K?96-etg8WA)&
z$x*seK)f=Y|XES*kcbl#LVnJ3I%7Vb@QxwSN^hm$z2#RIG+fnrS@+kRDYW
z5nJ2QKH^;oMTALe{fRy7u>^-tqmL0k=`RrRH$3^QdV%n%!?L}jxQtsI#|J^T^8|0<
zx_|QErsd99X8GF|VP9ita$=&(T&F_BR7)L*l~w9M^aUUgFz9dK8fGCRx;qz{BOMug
zaqv7_HbL8RD{kw4x$EW%Z~cf0zU!GHxEkZr>SMCO6I41*w=JE1UW#kx+ACCWv3v7g
zL;vO>;f--@=K*D38?ar3FW0}x&lR`d)c+U~%ic{EvH#_4U;p4q<}rK0ZiSZ~pdsl=1O<1I?$k37>=UEA+`U9yU#>A!#+H!9IL
z$JMLx4kp>^z40a$aQrYE-Pf+YVC?L7kkGj;mW)58Jf5$PuUH9C>5TgkUBZKeGS%e1
z_~3!g;-;X`4E^u<3L_qXYsJj3=#*cj?=dW&&Nd&wEcnjP=2LXeqs%g}h6J^vwC&oX
z)wJw|9gyG)a)5f8-%H%_QS8;=$}wgOn>#*U$b|v{x)goRMwerjSz(vk2?;!QGY%$f
z_ih=OZ#7S-OSubOZI2^f<1a6-c57~zEz(TDA&;p$HeL^*_4BZFcS$ZsH6r_66}txt
zAsgh~Uwefx4*(%~Dv7J=YJ|sLBN~
zxHb=+iUb0Nh7>pFvRJOd&hBiYbj7Krimp#q^lksz%KT}U8HE!`y#7N75YN)r+82aw
z;gaKPJQ4OouZE|i6mRFQUnt^O-YG0cxa0T?g`$^OlrrJn=tMwC%#$|M*)VKvt?TmV
zI_knnTuRm_(x0wGq%!FR`7Z$?3MJDR-Q{88)=4qd!F4RUUSY4K0Y6kOKGO^X&`!zeO;F#nBz76TvUrOejc
z{v5E_B$3Y%_sbqoOL}@FCcf_;4=c!|Z)Rs|^R4c%2=tDgxU`z^z&W7TfJZdY2^|?x
zc1(KG{BHgTy((B^S{DKvN5e7a)?Br_EYm&~0d2Rs0Pp8O@zh$>pBUQB_KCP%yu
zBVI#D0>mM!tC;yzCoyUS=Rm8gtBc!FvGH|D$!T_GZZtJzZVkfa1njWUnw9Lm_SCYxSY;EL!bMO2?@
zR-0+;rvW4BL3W_xA0yhjG0&7TIqBhkxm0f;1_-W6IRxN}>)sO9oo~x{V1;=u_Dxa|
zdgF=@GGtU&G>R;zwfU-FQyI5~q?c-Z1!&Sg*IX4mwo()J3%v`ZMHuys_s?%SbrAu%
zfqPW9T|nG*o`r^fSYDF;&t3Enbms060^Fy+$m`or$P#$27A@SH)?@lb2-UPZ`gCeu
z+&tgJnq#_CHJ*&U;AJn_9j4n>Gy-4l;YsLHnp%4ZcY?XL
zDD4ezsk^$9Qyp)OO4v2Ckj9i^k-s1`Yzqh%m6!@5t|f;IGTx1_t)c>D?Lt#1s-!$H
zi`Kjp6v9AUl9UnwrRTc8lvLM1fv#*M^#vL8|Ni3!A=2
zZQ}z&cr4XSTm(J8>V}f(#Ict&JXvR1vU(95!P-VI&M9K3no?QG=IBzvWImbX
zKyqTPcZpO>-`>?m28ZyVKbF7GY70P~^Bdr+-4Ex?ce@Z#d`n|uqoOLm=tajp{-WFQ
zZfg!E93gSk*Aqo_r<^0;Dtwk!HklSndU9|vahgrM%TGiqR#O4T76wZIHL<&{n8j_6LF-nj$*kMr>v*&c0-K7`$((|N_o`y-&%;vfP;J*Rv=s<+
z8S2n1jLZB=5hXztb~_cBqcW&wwF-;W!g9|h6$?s}nGk7>8&UmDu7ltU#F##JZB64F
zQeK{JSC4Lv%#r
zl9bX$+pQ5QYce0h64<`aMqd&uc)agD$FUc5i1PI2+M{+gRSEZGK+8{mwU4;DH$G{e
z6L#+7EIH137`tBKmNb|yQm&dau!E_QDrEG!fS9N7@k;e4VP*xzWNLpo8
zaoqml5!@YQy4h?orZZIZp9@uE-HCvmxIiot*<0mdl#
zo5f3I953TUy;OZUQjgk1L>p55S5kC@|Q@#VI
znwg8l5ij){9!@Lf(GwKZL0WfPmtel>o~h2?Tw3yIIH&RAOT{2ifh}c>wQx*fu5#@N
zvAf4(XV#BWO(y8&8+^3v#<-q_i%o8}nYsZA6H>q_P|J$iwe{KNWnu!Exh?e&^Vm(m
zLz6>S>Yi^zouVl@JFbcO$vlWeBIQu#c|)rx*n))(Pj}xMUtyv~*NdkVw>y>|C!@Y1
zGr)B`Z)D^c(H!SX6&6?8Lu}1d-8RuQDp<(77{;=54P}Z-0smEFXEeg5urNz`V|f6o
zLYHU=Fq8M7mU+Pb%WM+qN4Emw1a}E_1`{0#&;Tqy?A5W&D$ZD?w^8
zQl95Fw_PLYIGWYcnB1ErJ2Pd%Qp{`e-EQ7UL?iiH^@hRF^>wNWPtR!+Hvb;p_h>zCbVV?!%a{_)H#Hn|EhCs!H`=Z|P_yfw~Yy<$8
zy{y%%4fYT>9qZ}El-c^Eb@KkXa%QnTT2DNC1;Fi?I&15VzrD0DuZu$|Groe3m+Zbt
zL%iRBzLCB)gcdM8(O?pJs!W#!LIdW%pdav
zTwk6zb_~7^*MU_$XUu}TxYVe~u^lV?yk`5$TR9bq&6{#|4-<kw7Ui2u0GKx);9GO`C%4hRu&*(wFMBKP`xg
zi+dxU(o_XloBf3xiQSZmQS$3MPaAR$q>PMKCzN!@#XCz>C$47e?Iy26lIk7MXfs8v
z?#v~Ynm|^{4Ug%a^KY@m#`?8bT33#^GyCasQu0Qrw|4HM#+9C<)(*gv%puK=@{u!B
zEEJzxiSIVeK~xP2k?@VFbxxWvygcS_rQVVm93woDpVqB}jL&V?NepLq
z+~b-fLE^3SajL9*?k&VN5;E4fHo1va?Mmfz$~EFExQ0*j`ZXLW^<03tladBh1{p?#
zuhupz)QPsEO8F-Q{|}2-CjII!_)ej}m(gjBfN=Lz4*ILxfgHanqX!ax(WhT2~H{H5YX8f_nEFFWW;u|$V@w?uVnPmLu4yU()p#U
zsz{T4bEfw^q^T7R$_{QaJT`UB(sm}VG#%AoP|SVpqvy?}ianKKJp*I79~lwCX&n~h
z^P<|k2(R(3Ca3j^@5u3n+H1B!?F`XiMorLokrD?>UVJ^X@X`kNdqa
z9?y2qYbiXi18j4|WR6^2fr5UZ&@s?!gJYuHQ3>dP88b76#^rvxjPy;4iK%*QJFAhU
zBH#lN1CJGk3@nysNpah6t5%!Oa@%es+O6zmN#SMf%Gzrf&*j?^TC@X6cgXe5W(3su
z7SfkkwavYT$Dn91L6^gz-Y0@e=l%(e3(l8{3
z!L1RPZOo3wYb}!i>Ydf65@o@YEC?W$OCw>#)vkQy)-UW-V7}YrXA+1vtC)pLt3R%z
zpwT^A+coj$soPvTXbz|24`)jstUMP7KZ}o#51<*i
z3v2e01bX%G8N-B>4rc34OLZS;%#j)Uc-_^vlzVNEDVOw2;_wJMr%Ee&O2RvqJd5+`Y<6#U;Hsxs8z5R%Axke$Mu()Vwjt67;N3QchHli
zBgHZzU_I;LqU;=%B|J?AXf=2N#|F}x5Ch*d%vU}ayTl+?;XOK!=P^?CCZn9zciWwU
zEse)F6OENLix#^YZI#4nzG$}Z3lZ`-!{kUo_))Z!gm4A4-r;}yTHX#`BcRPr^|SO|
z7p})FFPl`+C^lO=cznZ>!@hLeINkM&_daxDuGS`}KF$x7q4V=;txdLUa&9G~Nwt*x
z)a+jCO8qVrQSDhvGOzPY;$4(#c8&$c!tfx%%c54!naAseUyuxMxs~Fp4D5E^AJkXE
zz^d--EAASy!WC!ci}8;hO}+Z!x7$1$SJl$1Jkma4(LDBrNXU{kTE0p3jeMtwTkICz
zTo{yl~FpDCV%JXoio$*G#OeZIBk~7$$WhQ2xij^&-7P*wg0&=5}`8g6DODi|%K#x~Lk~
zt&rz|+H0aNx7*_ZVpgoM=icTXrF_d;yYeQer_i3Jt9u8s!^E`
z8{$$
zC#aX-o@hmd%bQP48$x|MaDXCW+@d;ofnaMo{%K?kKbh;Mw8%sPg|Q&j4Ja<-zLpo4
z=Q;keI2!xZertk+n}bxj1_6_OYs%Kt$mlhAY0e>JfN5i_D91-Qs)U!lVepqUs=&ky
z9|4160c3mJfKr*!Vl6RemoxpV00-KEeB;oV|!g@T#zI)-TB`uvC^bEOnktH6Wl%i&?CB;;+aK!RUNxcP-
zsfetf5`6VU6cVC3Fj(U1EF!p@IWR`@L^%xHn(~8r`n$K2FG9RR&Adx!q)?<%dH(a4-U3y1ua5sZ-+v8%DZCpzox^`=
zVfgD(;1k1ViZD;H|0eqIon_s<3p_O@($5XZ+qa&yFTy>4_OUGyu2@hr_dI?S0sg}V
z$*>j=%V5Z%KRG+E5P=b-`N`Mp{64q(pQBFs3Rn%l_agk88U6ilWM7vK%im_;#})%D
z9TC)@Upm0TVg0A&{QcMe{FVsoyX^6&-f)lNXD5aXh}8cdulWC&^Hd1$|HGn6;rV`k
zA$Yuf3r}|yjYa_ijjihQM0D8>Km3gh@`Dll(;TCPQ$?kveJ~{71r~(IMU50XT34(v
zZpjUQplqJ6W?{4y-l#**+Z+ZJj$ZrKIrC*7vSqm3{p}Oy)ka4s66dB$O;5HL{T*x
zmAhA$3`p-r%gf1rM)1JNGL>EYZ$tTQ9&see_?ooar>2Biu+((yr7}LsG{mOOa)Tf<
zrEaqt*O?ox*91v2vgWg`le5BvE|k>lp~1nn1++2gBG%m;+Kzm#r%W7?Kk2&V!`@C#
zWyoo0Xb3KMeQ~+IKsT?uE=EEptW7$2vdf({lK;es|2gNW1SsBSbH9yjalq^pg@UFtTftp`
zDcjzzWz`$pt4X;U!fdFovEdA~*T4P}3WUbyh7;bXJ(rNap?0)s+qH)&mSK!a&4O)s
zpNeq&u&X2|jmlwSlL^EBopAZb5c}MJ7aFQYCK4EcC7wn_RY~Q?b0LDZ(s;gc2$Y5F
zY*3}QCYm5C%)=3yRkG5ihG-0F?2M{RIvTHLpVA7Di98}3M*NvArXBlT2|lT2;supT
zwaR9duy68KRh8ak#Ob^vfAMfZ
zbZI)k%-lP7F}j{l(bK;}G1z*a^y<2IYir8|nRwruKEIBjENf(oYgMa|iCpyy<w?0-xnHa$}#oA}3@)^|gkn+|_tjgh%FdikrG
z-Fk#i0w}xIuU#4_Ev$it8pW77WWIGXe)pgLZC>uud}de$h@rD}^=(iN`;Y{taPMkC
z``N(zCh?;GXQhOh|Nj)e#)UuNt)_-hd7=k9biYJWTLYGpNkv
zVLdyQ;pXMbZaQWuQ*A!Elr2TM04ddOJiBia`IFDmg?gr6N$yk9z*GXLo0bd^IZH?%apU+mEO`e_viIYVQlh7Z^Ia6FBT+rk!Jpk!h&e
zZ7>7l<&{haKkk4ZwjrBzRnDfaA~vifUHo(4X9v
z!a{*SNwS2nzM4R(AY|d}2NVES0*iQ!*?jTW7vuie+LkvYly>;14pqy8?Oq}{>
zjwmR|h`5`M;wb>*2!IM7(>8Bm+E|ynI_2q~&PG~V+w|v$alO7_$rHeJUK_KQi}$|4
zKWn3dU^ecWOU+6D0muB^ivPn+e8vSf<rDVs&&zbX9|`UFxKdv!3J7rd?KZOCo^s8By7`h|?j`9IiEn*h
z5RQd88{e|+)}A9MMqOX0iT&^4^2n$BjM)_T?2#jewGkG>L_dl_%}!DIQh
z8;^`IpP7~hGth3my>h$FMBy)#RCgSF$FTUtbYdeQ>ul}1di5GqUs@FX$`PJU5-T$M
ze|V|iHi37vSoePqIL&!LVdUS!q9qIlY8oC*XO|~h4W%O11we3CcRom|s9OB>cMNZy
zLiDG6M{GBpj@Q9z`2)`V!WZ;PlY^B?1g`#|oj7Pr^q{LQz9LG)92Ac!CJ=A7rjxP8)$D)UFhAWN@>Pvr7
zq*~b}ScbzjIf)DL4nqF6p8qWh`SWk|o&qlwEARkTJ+3Zo7Pqeq;qc9ivIc;KFkNMS
ze&BvP)dT_O+2Kz)$jVm9F(+Bh^P5Gk6=TDO;R1cKat>j{9nt0gE
zB@dkW{X_xpZZSLEYUgU=(8lYVhbde+LR>d5Qu<50Z`fdorYi$y>x`vi>MD?VY-_V1
z(f1z+*4}e2oqfpnRvNMj2#?ygda6ZA@x;9?TS?ZcB|q5_v(ilN)Qw94@eGEy)$V3
zQQoIvPU&uLT=2(TdlCv>l;>x1hpFQd6iPE>l8gIV0U`$OsDKq?vG^{S$KH!;~o|E
zH+ESKxiY)6yK6U<1^ZJd6>BhGt7YoRkr)C)vYUyv~%*{jN_
zaU3aZe=HDXhzF0xC9^qpg7WluO?HW})-|DT65|PEju*Qul`VabHiqsrG1m
z#8|URT)ZM{LOy9P&NPa#k9qHM5v?Gea+
zo*lwHRHXLq?fvY`;gF9dM8nb79ghl3=Gja&%Gy}5@5pMg-c3TS%rW@fiS&Y&;5`D~
z8snHh(XWtP6fLMy?;F!iXy7Q_=#cRGS;9`RKp}~yu)eGGrH8CEi*xEplA0}-rXvxfJ5#M>bLtXI2Keoro31NM
z2WiOPe(b23IFX?sPzruH52P6%nFBXRi{rIQs+hEWT!}SbQ-LNTCyXtccOIeGg0I>9
zUbtB8|C_+Ee@@BAGDKy?vAs^;`jW;{(kHGj6^5JR-#8EC|^Hy<4?O
z?VuJs5NeAjwDOYz+-%6(X5Ic{j`lZZLdZ`lU>w7HP_gP=D9gBV-C_wa7JY0DCd1Na
z2^^8TLGwME1riSmU){W+o*r!sh_AGn%;ra*k^ac@7$ZCLqoFF}uNgze5SHP$AzA4t
z4mqxfK5-|}V`oWHtQE)H_{Q@=q1eaaZ~=LuRTx3Ce6vjj9QkQ+Nik}ONpAHNgV7R?cp1mPB;{#oxn@ugqxWFpn!4Z8*LcU$J??&nzzqG*P0>m$AY*L}z%pE0%h
z<*-h{cNUXt3NK8!t^=I!S)*e!;W4RhRIZ|9&vEIyH*w
zv2nEyMsExy?ygeZsX*~r7s{}8=@A+2Jm%Qu@v`#d#i7;xXwYHURRCpDWxoo5u;?Pl
zVj8lU&eMUz6D{FFn}^*(M2$O2vPVK|6W=y#u=`PWyeu4Uasu6C3x_tV8-JK|`4BR%
z9hDz9Dy-ZWW*X+Se9Y0yz1ZL9pO*#Scc$_}0Xw8QH>oST7$te&;Rw_}#?n{0J80aZ
zm7+&D^W%0>_3nx4*=DZl%S3L)pupr7=*M{+J92Lqk7Js|Um_b(XE@j2HD*8lZk2zx
zcPPJxYc3Hh0eX-L3l7*-gTUQWCCzpt^O_sX%)RrG&}LmsqAC7HzdrTr*&IeAG-QlB
z$QNzrn(~eD-ZpF0cl)%U*Tbh&!)j~=aaarK#5;nx4k1Vn#ba%3P-6@ca(
zY^EgtYtZyg`@Ng}Cndh$LR=wgRyr1i`6N9z-f?gKZ`XU!QwrdS;MT>zM1{u{d5akn
zU`_Mzn~eM4;-GbuxTLqe8#UjEd|wfR)VE`s)!;=>vFEn2U~%YOTLI0Q*eG9I4kU4AU~0`_yMda)_7U%n*8>B7{(Or0L!(yz
zF7~oS^Ij)=l}7ka3M>FFd5$d^l?CM6dDW621TbP9m0=yFDQvBne216*6(o8{KK^?(
zOr99AAy7rRV{;XXEkO4CZKw~{{iokO{hu*)ej3nduiTc(UjDE7`TM8$sAs_84GMqG!GFIV9vwdHp1mOhyKWsCf4F
zCvS~4V2T4&FZKU5S@`?Je7)oaTnbRw`>~+o-*5BqYsr=CC!eGcu);mcAKS&>ZVd-l
z0rSH||KEtmZ^P~p_Yf$I(*5LvSR=j&Cs*_Q)2IA>=KitxQlV&P`Z(f0ovx8le_Snq
zyYpRsNkxT4NH|?>F`p4iQfa*KMIw^M$j~rGxx3QqKga)%;SrepN(ayZ@eZQ>ai^$4
z0U?*G&hXfCJ=c1BKr3tZRI+M6U~T5f%LnXhTN9v@G@jE+aRO{Mlldtx2V*#|@$y6kI0Ve5pBLz^;TFu8
zt+t={MJH6`FcX(hhgr-wrB?WGKRf7z6C-tv$~Q)W3IqrZMyoH_^pWhGj8+(hnrqcF
zy{zo)rE7d%ZLWokPLO%R%KDW5X6ZpQzf0ks1WocpzAXbTA-}mxSH21rUoxNT#mhGf
zh=3Zh^JfJ=PK1SRd+cQ){s
zV{FF;`i{S>Dg1sspU2SsftQ8onG;E0pR2z;h@ZAd!MD=6eVIb|0K>p+y6QSoj_>*+
z!J?wP9AhaWzXu)_ZNQ}}9Z#+-UXv-j($6|AeL2MI
zMC8(f=&m^KM>GT;%9Pj~D`Jk-9r)3?F7hl0R|tyAL4+-|zx%hnyKLq&XM1gzllQjg
zSu8(?^Ix+K+B9uN7$sX>sHkbB-85b9T*m4+7R3>Bn2uPE(($R7Zn<%bnf{0j&zYn0yY+*=}N?P2kIu
zZN8M<5hc0WjjnF+crmfa_Gfa`HxB2^YA~ngd;a9ar0!Q(yBD`KNe)7eh>9ZxYI5M4
zO>#D)m$_2sm-F-0tZSY!W>b~=9U*PLNd^$u&_}?kK!!bPKBi%*nOO~qW*Y0$X{lwBw?00|eZO#hF`E16ia8~L!$Q5>K;lzzZF5Besoq8&jt!t8@!Jnx9~ihEv%k*JpH^g3p5lLyV^A5i2o>(7?l@rkL|VZ%U^I^
zjrc8o(9@_k9nG>a5~NKNKt*JF={kxFGaaQT=>6Q%Aq#!>da80=q<}~gc52i+#N@tv`ScjHdyocXOE_5BD79u<9C#GBt+v?BD|8+sz(
zJ$ZRH$j?9Kh9J2XwSp~nd%iO)$CG7Th64ztoG-^d64b)k-uvuT4-AE*Ent!LrlT87
zTHIg-zXx&;FLdXaYw#y(OpkZn9YRTv8Sd(j_a!l?-IN*ce~F4xnG3iRvNk^uKM#0j
zXcLQj=i^2Vm}#xy_LLm|^>|oI$AzALw0^h!+Q0%JSFh+aR;aGa@pKPF2x_!iq-IpI
z+63f-3!Mgq@YU7Td!g)`)77EYKz|d=cu>!x9q=Atxd+R%p3fWRl+Lpf*h~uF;jNG6
zX=$pJS+{r9HeVNO5@~4T7q~@gQiDIC!cQ
zeV!-b1h$R?z+qFU6~D}@b==ln!0TjAbLMqAN{V+rx!P;1x$44se4CwMp2%Xe0_bq3
zmrPfh3@?TdrhleKHv!b*HCXg+uFmI>wVUOMrxIXPTh=&|_jt}T(Fuoeg4i9qaEM7<
z_FC-r{ecrHx&_fmxLyftPa%_6I-T-m+HQ_rynl4rhUvt=;GG5okM?!-0dkLJI~&Ay
z?+-=}0fWfN{6NWQ6@Ji>KfiiSas<|sqQ{VKPMfW>9hJH)$~2!Sbvia>L{IO>(i9*C
zZ_tk61ADhl!f>8a2Qa5HCXA%%I&jCffChT*`<GoCMi^cP8s_uJKYes}Kqt?wKU~P}6F^L51WD_+C`dRJ|Ib>7r2N(_)SpW`s<6`7>cP!YSY!`-q-g@kC
zkJ!m20R-6?%d0RwHf`yHvh!?-slH&+GHZ6bKB^fDEQzgpEJNxM5v?
zKZgzm;4}#-r_%nuYpDR*Qm;r|_QFq1@pzamWNR~LmuV}T+PAoDESk6R&=6t`+g=_7q=P-?C1yls2kcfx8L;ZcaYkSmsuXdK7I^jZfel)z~B&jP2%vm
zsSSg7q_@l;T-*sG$+MTRc*ZdwUnm7I3du$@!8)f
z#Ze>jT5$XJWO^4E%EyShHRnKoPc=j~{^{}6VIR**Tn5ls@zV7;x}X2{ORNrV!;bjT
zxcRhNY8wS6KaPz3;EPERFz2d1sdQR7=1C02H7^(FK=V!g2^_mS&2yU{*KoF5nd|mE
z8A=+29#hQ4j2Eig?7Ct;(P^4Vcmy4z3MFyv*zX|5KjN~R%>~apZEoK#^A)Gze)@O*
zf=4dEx_AM!=Sl_${T??(`3nH9Z)W}dKELdM%hH_d8NM%!Q<7u7F|7M3eRHbl{$s1wfyI&<7lQ$u
znl7-@+4*X+6&+=&|6EH4$pf~_+z5hcbdHz!Y+@v7VzLqfX+4)Z>j$(Fxe(U-N_Mw6PR{}+cN58gHoN88
zCtzku-6+X%88_CCPY4DcZ(2N=o}u-=a$~o)hHsG&Y&2MnEYX|fI=`W(qM_kG|Dd0y
zfRmp^B|zf1^q8^W70>;t`FU3!wg4d`T#Dmr$ZF|FTvf5Mu&^_9H9(a
zW$6@gKtI$Rh^0OA*YEBX!UoR#X*gc8_7H^P4=OyePaFDZ98X)&>yWZfp1o
zR#X9z-T1*x*PtY>%_?4llxNeVd%w=^#z0F?o5{vSlKs3A4>A>H&_gT#+2_Ja-M2UW
z?u~(L6X4-;I8*n@@@4RN(Qwl*YTCAt!hVjct%^z;4^(3>Uo{;^EtDcP-ERC7EgQGh
zt9eUJsd=Y)V0Gwfc9yuL%r(L~y?G0y9BxTTw`OV!EK=swv>j`yS73P_s`(CkX=2K$
z&Zp*6y&qym2p;OYKdiP{m4yI@6f8wn=qNR(4T2xw~w=@o2X@=SP@D{ZH
zGUsr}xZO2aC6l0#QC!A?kdy2bkc_tt&R{2*b6nMm1~yLji39iA%^etc%+SEV8*d&5!q{@kct(_W9}Gh_)j!2z*k%BPz^&xwTNzaG@bDaat-5
z7-zXZe0|^$eXcsO>1Iqx(~SlL-tpR?a0;$Qx^R3>lHEo@NfVwOe=59K`ukY^P>e#Q
zgUv#9i{|S-;9#mtV!Pa!yXB0
z#RA5~Tb_S29K8536thVD-#J2~(gKlNBJRuTH+5^h@)Zb(@1V4T($KGY069?2m}g>O
z00X@)RZ&g9x~`?U3O7fPlm);4lqD4l96%tV^wc^YAf2}04P>L7)bB2K$d!8F+(AYU
z3JsN$lM9{kq<(_Q)AQt@#DCj)*Y(xh)p=C|Ub_2&j}d!Ykz+5AyzQ2CB=1#abb2M2
zH;Ekjm}j5N$~)-;RL&cSW_6Kv#YbMv3GXIfw{u(yScO*+r`;ANfwnH;r;@8+4sDBF
z=t{S3eAR2nMww%G4DwF18Vm{JS-H&S7=e3ru@;2l@^shzATNiN^>od$#nUJM>`b3}
zC|x1LSEGNvUeklI-}wlB%Hhj;PIBpE_^D~V_43z)GD8|c|ru=yl&k$oT6mFyV`9e>ZGGg
zmz0jKV2fF5;^a6pt574WAMdoV%ei{b@^JRUpcrx1Ym_&+OrpVcZtY#sCnI+-pXDCE
zl6^Ml@|=j>I2Ap!bow3bst>+4(c$Y?b3V>3+HX|e*qOWyKDnK|u$p)%bD5Rr7@BB?YMgl(jB7xdj_HEq~3OyC`ev`m3a
z7r(l+{YX0(Y!9Zx2DU#VvNq#QS-w+?c0f!YAW2jIhvXmWOi
z&p57bpHOU$>ln>CO#`2#5}eGY8qW7SNK_~;9;#hQx^akWrcB5lzu^TA)dcz&Q3K3@q{(gw#+`po(xjXPaaj|>Y;tI1Vc*UduWR+V5uTilO9ji04
ztmED$1EdRSA9zn^mivzIV?>nH)m}RG0t6{E%|T5*u3C|jWL`)})3Diti!U{R-T{L9
zwgwCdlWzscj*eAPWJsNR20=DMOLXQqKGtk%r=Mt2X6<)%qIcE`)fS2hcHhNr6;5lp
zKPu8K2%LDKqoQ8{
zRv4kqhh9TIb#~R}QM0(c8$cumQ^2X>LF51)@Dr0{+UjF~vH=aJg%+t4s8+#NlilYo
z!?O_iRSRFF^|K|ZoXG--0!@-b4fEv0Vtu+4THzqT%r_v<;I&zX};moYl)S3-rYWGfY8%jUoezyMM0ouJx
z;HX5eqK7&q$y!~lBpb8laH-ze!N-g)y9mim$vb!q;ocY4^@kw9g>d#tuU2DvRvSdE
zhY(NKQ?bM4#X9GDH?f;zCUCG8?1RBb&be2>KCQ(yQ77j!Zn+9;SKxSa3V2>99|ViFiSl%UN@0gl@0Lxak=!kzhMcNQKpK
zcIJoYSupwMj}R2!Y~sBTCa1L%yfc77Y#(w{!ThnB$F;f3DIH8w`f$
z!wjl2iKGTKbM6-R#Off9)$LUQm`N-sthG30ffuHKiBX>#l2H446EC4yr6L=MLup4~
z^`jYE#m1@{K^@zj)K%uQdUxn5l_Uo8j78IVmd7@>8fJv4FkB3lNv)phf4rwD;xq5I
ziH_K7EJa)-p7tbO2a7Q>>*4
zLI2FVDN}&WxM6N{am*vlKUZO12PEwPw{WJ4(ZO?oQJx&den?|>QU1X>!JLbzXH?s|62I##4*1Pm0T&lh@0+~>`7=4YPu
zWg*pKyjgR?SwKuPzf8frIa;75@5X`&JALkjgl4N>B9L^}uQI!n9_O
z8KtneB_8Fu`6u&il?ut^OYV4z@>el^OLq?Y?J1R4-Cm)*CoRaNma!}Y
z_bxD2aY;!ny!=qj9yh}i_`+9gO%`6Or?mr;pZcs1zAUo8O{Jn0>c)>#ngBDGwxq&m
z8R3p7EVb3yZi+I!>L1G|c6ic;=eF2e9mAw$_bCe>oB=={r<-y5A!GfCTsFJ4r+mDK
zx(pHj+D#>JjCJ?iS1GO=Lz6lzk3Ss>Mj3vh{s+!2pAHWoZJyPQ+_tS9$VBclVL)Ka
z%)rpoke$CN^+^M{Rd&Sn<~;5k4bNfv%`g-)TWJ#M{9+2lRpKJC!VgI6C5hJy4mEuE
zn)mT_Da~huot!Rg`kUi)&?*54eg4lr+8Oq8~q
z6f?56VzdQYPAOanf>2i#?*+_;JipFf31QrNU+LY}%Q0_|L0f73fuW=c1eto{dN;SU
zYWnOfh19nQ_jNX(#8Gz^BBnCm){OfwwG2dwz;Zu>c0@U?n76CIGB7$ndRmIjf4lZE
z#uVJ@en)a=Nf#-d`;vXv3b>o*8i*yEn&Eveh_geVUfvQ&Htbzh#0EEzTigBsf)scT
zl*hy-u45kshcZ#vR%KT*d7Pi1E2>`?<&_4&Fa2uqy~F7V(vEs(xrIQ
z{9oSPVB^`osIV)#4k^!ml_MR$0ZD5Zf*Oo+W1Y@ks-dU8?p{)odvwws$LxiqFZ{^!
zX$IHJ3d17C?9cal$;FZfMtdrgz#}u~`6>m+hx50t#k<4vmTk6P$fN~!+;5Cac*#I!
z;(^)VE(DYs7F2WcT!C7m4g3WFg=qk?fYJI^G@*{qd_q9=WEjK-py!7%gNwTQZ=-4*
z4-I$o6eYpWfUYd1MaWcTGKagHJafQ}|Ni;Yr$hLR8Z$+j;~zc~!@5|B1)R2qjvp&Laa1pihQ^f09ele-e;n>;TA
zx})8m;(}7h0#Tj84*?ks<)?72!I$ZQOarO>7i;=F(W6D0)c|5B!kQ;i6ERt_8g#Au
zGJG{OlKjPMn|^(LPTk%zmwNhbD`N3^Sbpt+?7QG>zji+Jt56LAp>@b~^)y~Yx#F}l9Vdy&qW%VQX`)g2
z&f4qRKiDQdkN#i;r{FE{1>C1|V3gJ&35Cy*9feo6a&VoaFFftDxvO6Ipt|(8=MDUL
za$H`=cGilE);WOH#nHHz;_$_G-e-Ell;3J{P%OEQ;nNV%GlSs9ZsDifhE`u6H83$T
z@kcE-Iu4v8T2RIl^`A<6ULQX2QKrsKytmeaeOemfEVvrLdt`Wq0FGYwW3uw=P2`b^bj9J~JPK)v&Z?QaOl#mq=OVQc`SsSY7
zxR9_}0n$)nd=*G_L^Q*XJPtSOI*|t(=Vy+}5+}>X2?h=o$#;bolv1aEgmwGVrF*Z<
zA*`-hlh|FHtW6&TjJB3x>fTxef;)9=jL1hdU1DKTgEf0=7bJ<0W~+`|-DL<{Nn)bs
zg$H18+&v!-9RM1_shz+{S*uLxGvaQ}eo*|#Z{I>{^Mrj?{XrIKx17Kbvj>nqY<y{x&?Nq{h_8d5E}y-lH8U!0MZUXY}6Iv!?V4~@bB0XXIfAE+p&xk_M_&MK%ma4|nQIIYV}zq05R9{Y
zB>J@aZ2ub4jfPgNRXWTK6IOW){pjlfFLf=ehrzOTxb{bs34Kv)oOPNyixD2q>n|h^1Hnzz1KW=>uVB3x*oAS4-5GRwE>CD{&JBo`-MxNLehFGjGux14$_n
z*WD7$hNJsU9@B>mM7fzsFL#n?+p|;c!huvg5FL_*uW<~{K?f_IAZ+GJ!nE>_0gqMn
zNTK2$2vFSXmIpu)ME)I-I_F|7y)Ra`nem)zf&9zB#bG({3O1%~U{L3<;yUkJZit?I_n_>E
zcq&7aFt+?2k2P7v%LTL+i9E!gIkq=gEys%r(x;Lbo8PEqe0N?I%XYKA)ASn;ao?OW
zP4|RCp%%on=buFnmkCGm6lZ{rNPu4rXf@;a%T-*mYHAOWB_0s{u3zP@*UO0!|;
zed0fOaZHyBL=tuVY4?P_cp?B(t9_jQmzfz5Gjm2EJgEE!{44l=oOc{|RGW~
z85(tVGb4_<>QPtJ{r8%ZnlP-K!6_hVQY^pJ^)W_WcS|kBW0RA9b^>q@ypI<|)!TLp
z2mOw?KdCPX=jMtW0IpgW?Lcz#DFVT}QeyjkD|A9vY_TUlZK$7)#_M=UQNW=5i0_73
zHs#|$d?8aHG&UfPXbTt6H>|Otio2sr{uvm?d!C$n*9Diu=_6t}J`
zx_-^$Yq8P(G`LXmypovhb%ODb*l^^#T!4To?sF>D7TLl;_A8raCeItS2BtxYK$RFF
zmC_2b?mPb0jTl<~JG4j`AT)f=_8FkHOJ4vBylA`?biK*>5Z?*fgc#`cZ6}hhthO41
z^|G+nq#K{(01lrG7Lw8vAn!p+tQYx(QX7ttKx_$|$(oh0j2E{*08BdORM-pc`F=327S=uI+k}YI
zHk|lE`sHnZ(aOpQezqQf>{Z$zn@>juMDvrhZBOxsZ;{Vd3RERF
z^NYP|Q|9*_RDI8DTzUYo(<$KhTYR^1S43ww_`G~VYVcXeQ&h2T$JAu??p~yX5}EVd
zArQqmSY_c{Sxfh=J1z5!h4NqQgVRNi2kEd#SP$Z3KlIKzZ0*D_DBCN1V0_mKT*kFo
zPjP{082~mFU(uQEZdSOq@N!u0Y4o1ei9m&%`9
zpC5)!651GHbdNQrRVa}8&
z8fxd}Blg}F^o?NDXOPp0?7W)vOT**mb?LZC_wjFMPN+>ruXDH1h#U>$)8#9s@7@@K
zaCsCQJj(H6iJA9yN1gl5dLzVi#lds(?_xhQ%SejUx$l+NJI>+bUlbrssp}XcqbI@YU(;R|9F!`VKvmZ{%vK%_w(t
zb*{WNwUtw-gik$s!wk