diff --git a/unskript-ctl/config/unskript_ctl_config.yaml b/unskript-ctl/config/unskript_ctl_config.yaml
index 2415e6562..f36acf823 100644
--- a/unskript-ctl/config/unskript_ctl_config.yaml
+++ b/unskript-ctl/config/unskript_ctl_config.yaml
@@ -33,6 +33,8 @@ checks:
arguments:
global:
region: us-west-2
+ matrix:
+ namespace: [n1, n2]
# Credential section
#
@@ -147,6 +149,7 @@ notification:
Email:
verbose: true #Not yet supported
enable: false
+ email_subject_line: ""
# provider for the email. Possible values:
# - SMTP - SMTP server
# - SES - AWS SES
@@ -168,6 +171,7 @@ notification:
api_key: ""
to-email: ""
from-email: ""
+
#
# Job section
#
@@ -176,6 +180,7 @@ jobs:
- name: "" # Unique name
# The results of the job to be notified or not.
notify: true
+ #notify_sink: foo
enable: false
# Specific checks to run
# Not supported: multiple checks, only single check support for now.
@@ -220,7 +225,6 @@ jobs:
# - opsgenie
connector_types: []
# Custom scripts to be run.
- # Not supported
custom_scripts: []
#
diff --git a/unskript-ctl/unskript_ctl.py b/unskript-ctl/unskript_ctl.py
index 7d58b0cbf..cbdfef746 100755
--- a/unskript-ctl/unskript_ctl.py
+++ b/unskript-ctl/unskript_ctl.py
@@ -34,7 +34,7 @@
from unskript.legos.utils import CheckOutputStatus
from unskript_ctl_gen_report import *
from unskript_ctl_debug import *
-from unskript_ctl_show import *
+from unskript_ctl_show import *
from unskript_ctl_list import *
from ZODB import DB
from unskript_utils import *
@@ -81,6 +81,11 @@ def load_or_create_global_configuration():
if config_yaml.get('checks'):
if config_yaml.get('checks').get('arguments'):
if config_yaml.get('checks').get('arguments').get('global'):
+ # Handle matrix
+ matrix = config_yaml.get('checks').get('arguments').get('global').get('matrix')
+ if matrix is not None:
+ UNSKRIPT_GLOBALS['matrix'] = matrix
+ UNSKRIPT_GLOBALS['uuid_mapping'] = {}
UNSKRIPT_GLOBALS['global'] = config_yaml.get('checks').get('arguments').get('global')
for k, v in config_yaml.get('checks').get('arguments').get('global').items():
os.environ[k] = json.dumps(v)
@@ -128,10 +133,26 @@ def insert_first_and_last_cell(nb: nbformat.NotebookNode) -> nbformat.NotebookNo
'''
if UNSKRIPT_GLOBALS.get('global') and len(UNSKRIPT_GLOBALS.get('global')):
for k,v in UNSKRIPT_GLOBALS.get('global').items():
+ # skip matrix
+ if k == "matrix":
+ continue
if isinstance(v,str) is True:
first_cell_content += f'{k} = \"{v}\"' + '\n'
else:
first_cell_content += f'{k} = {v}' + '\n'
+ # Handle matrix arguments. Basically for matrix arguments, here is the
+ # logic:
+ # - Define n variables, n being the length of the matrix argument. For eg
+ # namespace0, namespace1, etc. with different value.
+ # - Duplicate the cells which use matrix argument and use each variable
+ # in that cell.
+ # - Next, since the cells are duplicated, combine the output from the
+ # duplicated cells to show a uniform view.
+ if UNSKRIPT_GLOBALS.get('matrix'):
+ for k, v in UNSKRIPT_GLOBALS.get('matrix').items():
+ if v is not None:
+ for index, value in enumerate(v):
+ first_cell_content += f'{k}{index} = \"{value}\"' + '\n'
first_cell_content += f'''
w = Workflow(env, secret_store_cfg, None, global_vars=globals(), check_uuids={ids})'''
@@ -204,9 +225,10 @@ def run_ipynb(filename: str, status_list_of_dict: list = None, filter: str = Non
# We store the Status of runbook execution in status_dict
status_dict = {}
status_dict['runbook'] = filename
+ output_file = filename.replace('.ipynb', '_output.ipynb')
status_dict['result'] = []
r_name = '\x1B[1;20;42m' + "Executing Runbook -> " + \
- filename.strip() + '\x1B[0m'
+ output_file.strip() + '\x1B[0m'
print(r_name)
@@ -233,6 +255,7 @@ def run_ipynb(filename: str, status_list_of_dict: list = None, filter: str = Non
if len(outputs) == 0:
print("ERROR: Output of the cell execution is empty. Is the credential configured?")
+ # TBD: why do we this, the output in the last cell has the check uuid, should use that instead
ids = get_code_cell_action_uuids(nb.dict())
result_table = [["Checks Name", "Result", "Failed Count", "Error"]]
if UNSKRIPT_GLOBALS.get('skipped'):
@@ -260,11 +283,13 @@ def run_ipynb(filename: str, status_list_of_dict: list = None, filter: str = Non
if ids:
for output in outputs:
r = output.get('text')
- for result in r.split('\n'):
- if result == '':
+ r = output_after_merging_checks(r.split('\n'), ids)
+ #print(f'new_output {r}, len_r {len(r)}, ids {ids}, len_ids {len(ids)}')
+ for result in r:
+ if result.get('skip') and result.get('skip') is True:
+ idx += 1
continue
- payload = json.loads(result)
-
+ payload = result
try:
if ids and CheckOutputStatus(payload.get('status')) == CheckOutputStatus.SUCCESS:
result_table.append([
@@ -330,6 +355,64 @@ def run_ipynb(filename: str, status_list_of_dict: list = None, filter: str = Non
status_list_of_dict.append(status_dict)
+def output_after_merging_checks(outputs: list, ids: list) -> list:
+ """output_after_merging_checks: this function combines the output from duplicated
+ checks and stores the combined output.
+ TBD: What if one duplicated check returns an ERROR
+ """
+ new_outputs = []
+ # Remove empty strings
+ filtered_output = []
+ for output in outputs:
+ if not output:
+ continue
+ payload = json.loads(output)
+ filtered_output.append(payload)
+
+ outputs = filtered_output
+ if UNSKRIPT_GLOBALS.get('uuid_mapping') is None:
+ return outputs
+
+ index = 0
+ while index < len(outputs):
+ if UNSKRIPT_GLOBALS['uuid_mapping'].get(ids[index]) is None:
+ new_outputs.append(outputs[index])
+ index = index+1
+ else:
+ parent_index = index - 1
+ while index < len(outputs):
+ if UNSKRIPT_GLOBALS['uuid_mapping'].get(ids[index]):
+ outputs[index]['skip'] = True
+ new_outputs.append(outputs[index])
+ index = index + 1
+ else:
+ break
+ combined_output = calculate_combined_check_status(outputs[parent_index:index])
+ # Combined output should be the output of the parent check, so
+ # overwrite it.
+ #print(f'parent_index {parent_index}, index {index}, combined_output {combined_output}')
+ new_outputs[parent_index] = combined_output
+ return new_outputs
+
+def calculate_combined_check_status(outputs:list):
+ combined_output = {}
+ status = CheckOutputStatus.SUCCESS
+ failed_objects = []
+ error = None
+ for output in outputs:
+ if CheckOutputStatus(output.get('status')) == CheckOutputStatus.FAILED:
+ status = CheckOutputStatus.FAILED
+ failed_objects.append(output.get('objects'))
+ elif CheckOutputStatus(output.get('status')) == CheckOutputStatus.RUN_EXCEPTION:
+ status = CheckOutputStatus.RUN_EXCEPTION
+ error = output.get('error')
+
+ combined_output['status'] = status
+ combined_output['objects'] = failed_objects
+ combined_output['error'] = error
+ return combined_output
+
+
def run_checks():
"""run_checks This function takes the filter as an argument
and based on the filter, this function queries the DB and
@@ -372,7 +455,7 @@ def run_checks():
action="store_true",
required=False,
help='Report check runs')
-
+
args = parser.parse_args(sys.argv[1:])
if len(sys.argv) == 2:
@@ -444,7 +527,10 @@ def run_checks():
if args.report:
UNSKRIPT_GLOBALS['report'] = True
+ # If there is a matrix argument, we need to replicate the check which has
+ # that argument, that many times.
if len(check_list) > 0:
+ check_list = create_checks_for_matrix_argument(check_list)
runbooks.append(create_jit_runbook(check_list))
status_of_runs = []
@@ -469,6 +555,64 @@ def run_checks():
# if UNSKRIPT_GLOBALS.get('report') is True:
# send_notification(status_of_runs, UNSKRIPT_GLOBALS.get('failed_result'))
+def create_checks_for_matrix_argument(checks: list):
+ """create_checks_for_matrix_argument: This function generates the inputJson line of
+ code for a check. It handles the matrix case wherein you need to use the
+ appropriate variable name for argument assignment.
+ """
+ checks_list = []
+ for check in checks:
+ input_schema = check.get('inputschema')
+ if input_schema is None:
+ checks_list.append(check)
+ continue
+ add_check_to_list = True
+ if UNSKRIPT_GLOBALS.get('global') and len(UNSKRIPT_GLOBALS.get('global')):
+# input_json_start_line = '''
+#task.configure(inputParamsJson=\'\'\'{
+# '''
+# input_json_end_line = '''}\'\'\')
+# '''
+ input_json_line = ''
+ try:
+ schema = input_schema[0]
+ if schema.get('properties'):
+ for key in schema.get('properties').keys():
+ # Check if the property is a matrix argument.
+ # If thats the case, replicate the check the number
+ # of entries in that argument.
+ duplicate_count = 1
+ if UNSKRIPT_GLOBALS.get('matrix'):
+ matrix_value = UNSKRIPT_GLOBALS.get('matrix').get(key)
+ if matrix_value is not None:
+ duplicate_count += len(matrix_value)
+ # Duplicate this check len(matrix_argument) times.
+ # Also, for each check, you need to use a different
+ # argument, so store that in a field named
+ # matrixinputline
+ is_first = True
+ for dup in range(duplicate_count-1):
+ add_check_to_list = False
+ input_json_line = ''
+ input_json_line += f"\"{key}\": \"{key}{dup}\" ,"
+ newcheck = check.copy()
+ if is_first is False:
+ # Maintain the uuid mapping that this uuid is the same as
+ # as the one its copied from.
+ new_uuid = str(uuid.uuid4())
+ UNSKRIPT_GLOBALS["uuid_mapping"][new_uuid] = check["uuid"]
+ newcheck['uuid'] = new_uuid
+ newcheck['id'] = str(uuid.uuid4())[:8]
+ #print(f'Adding duplicate check {new_uuid}, parent_uuid {check.get("uuid")}')
+ newcheck['matrixinputline'] = input_json_line.rstrip(',')
+ checks_list.append(newcheck)
+ is_first = False
+ except Exception as e:
+ print(f"EXCEPTION {e}")
+ pass
+ if add_check_to_list:
+ checks_list.append(check)
+ return checks_list
def run_suites(suite_name: str):
"""run_suites This function takes the suite_name as an argument
@@ -559,8 +703,8 @@ def print_run_summary(status_list_of_dict):
print(tabulate(summary_table, headers='firstrow', tablefmt='fancy_grid'))
-
-def replace_input_with_globals(inputSchema: str):
+def replace_input_with_globals(check):
+ inputSchema = check.get('inputschema')
if not inputSchema:
return None
retval = ''
@@ -580,7 +724,10 @@ def replace_input_with_globals(inputSchema: str):
except Exception as e:
print(f"EXCEPTION {e}")
pass
-
+ # Handle the matrix argument related line
+ matrix_argument_line = check.get('matrixinputline')
+ if matrix_argument_line:
+ input_json_line += matrix_argument_line
retval = input_json_start_line + input_json_line.rstrip(',') + '\n' + input_json_end_line
return retval
@@ -600,6 +747,7 @@ def create_jit_runbook(check_list: list):
:rtype: None
"""
nb = nbformat.v4.new_notebook()
+ unknown_attrs = ['description', 'uuid', 'name', 'type', 'inputschema', 'version', 'orderProperties', 'tags', 'language', 'matrixinputline']
exec_id = str(uuid.uuid4())
UNSKRIPT_GLOBALS['exec_id'] = exec_id
@@ -628,7 +776,7 @@ def create_jit_runbook(check_list: list):
\"credential_name\":''' + f" \"{cred_name}\"" + ''',
\"credential_type\":''' + f" \"{s_connector}\"" + '''}\'\'\')
'''
- input_json = replace_input_with_globals(check.get('inputschema'))
+ input_json = replace_input_with_globals(check)
if input_json:
task_lines += input_json
try:
@@ -649,12 +797,13 @@ def create_jit_runbook(check_list: list):
check['metadata']['name'] = check['name']
cc = nbformat.v4.new_code_cell(check.get('code'))
for k, v in check.items():
- if k != 'code':
+ if k != 'code' and k not in unknown_attrs:
cc[k] = check[k]
nb['cells'].append(cc)
except Exception as e:
- raise e
+ print(f'{bcolors.FAIL} Runbook creation failed, code {c}, {e}')
+ sys.exit(0)
# The Recent Version of Docker, the unskript-ctl spews a lot of errors like this:
#
# ERROR:traitlets:Notebook JSON is invalid: Additional properties are not allowed ('orderProperties', 'description',
@@ -664,14 +813,13 @@ def create_jit_runbook(check_list: list):
# This is because the nbformat.write() complains about unknown attributes that are present
# in the IPYNB file. We dont need these attributes when we run the notebook via the Command Line.
# So we surgically eliminate these keys from the NB dictionary.
- unknown_attrs = ['description', 'uuid', 'name', 'type', 'inputschema', 'version', 'orderProperties', 'tags', 'language']
for cell in nb.get('cells'):
if cell.get('cell_type') == "code":
# This check is needed because the ID value is by default saved
# as integer in our code-snippets to enable drag-and-drop
cell['id'] = str(cell.get('id'))
- for attr in unknown_attrs:
- del cell[attr]
+# for attr in unknown_attrs:
+# del cell[attr]
nbformat.write(nb, failed_notebook)
@@ -724,7 +872,7 @@ def update_audit_trail(status_dict_list: list):
trail_data[id]['check_status'][check_id]['status'] = status
trail_data[id]['check_status'][check_id]['connector'] = connector
if UNSKRIPT_GLOBALS.get('failed_result'):
- c_name = connector + ':' + check_name
+ c_name = connector + ':' + check_name
for name, obj in UNSKRIPT_GLOBALS.get('failed_result').items():
if name in (c_name, check_name):
trail_data[id]['check_status'][check_id]['failed_objects'] = obj
@@ -878,7 +1026,7 @@ def get_connector_name_from_id(action_uuid: str, content: dict) -> str:
def create_creds_mapping():
- """create_creds_mapping This function creates a mapping based on the Credential TYPE,
+ """create_creds_mapping This function creates a mapping based on the Credential TYPE,
the mapping would be a list of dictionaries with {"name", "id"}
:rtype: None
@@ -1419,7 +1567,7 @@ def run_main():
parser.add_argument('--report',
help="Report results",
action='store_true')
-
+
args,additional_args = parser.parse_known_args()
if len(additional_args) > 0:
@@ -1427,7 +1575,7 @@ def run_main():
if len(sys.argv) <= 2:
parser.print_help()
sys.exit(1)
-
+
if args.check not in ('', None):
run_checks()
@@ -1452,7 +1600,7 @@ def run_main():
output_json_file = None
if args.script:
output_json_file = output_dir + '/' + UNSKRIPT_SCRIPT_RUN_OUTPUT_FILE_NAME + '.json'
-
+
send_notification(summary_result_table=summary_result,
failed_result=failed_objects,
output_metadata_file=output_json_file)
@@ -1487,26 +1635,26 @@ def run_main():
type=str,
nargs=REMAINDER,
help="Run Options")
-
+
parser.add_argument('-s',
'--show',
dest="show_option",
type=str,
nargs=REMAINDER,
help='Show Options')
-
+
parser.add_argument('-d',
'--debug',
dest="debug_option",
type=str,
nargs=REMAINDER,
help='Debug Options')
-
+
parser.add_argument('--create-credential',
type=str,
nargs=REMAINDER,
help='Create Credential [-creds-type creds_file_path]')
-
+
args = parser.parse_args()
if len(sys.argv) <= 2:
@@ -1518,7 +1666,7 @@ def run_main():
elif args.run_option not in ('', None):
run_main()
elif args.show_option not in ('', None):
- show_main()
+ show_main()
elif args.debug_option not in ('', None):
debug_session_main()
elif args.create_credential not in ('', None):
diff --git a/unskript-ctl/unskript_ctl_config_parser.py b/unskript-ctl/unskript_ctl_config_parser.py
index 78c1342f5..ea324c57f 100644
--- a/unskript-ctl/unskript_ctl_config_parser.py
+++ b/unskript-ctl/unskript_ctl_config_parser.py
@@ -23,7 +23,7 @@
#)
UNSKRIPT_CTL_CONFIG_FILE="/etc/unskript/unskript_ctl_config.yaml"
UNSKRIPT_CTL_BINARY="/usr/local/bin/unskript-ctl.sh"
-UNSKRIPT_EXECUTION_DIR="/unskript/data/execution/"
+
# Job config related
JOB_CONFIG_CHECKS_KEY_NAME = "checks"
@@ -39,7 +39,18 @@
GLOBAL_CONFIG_AUDIT_PERIOD_KEY_NAME = "audit_period"
GLOBAL_DEFAULT_AUDIT_PERIOD = 90
+# Checks section related
+CHECKS_ARGUMENTS_KEY_NAME = "arguments"
+CHECKS_GLOBAL_KEY_NAME = "global"
+CHECKS_MATRIX_KEY_NAME = "matrix"
+# Config top level keys
+CONFIG_GLOBAL = "global"
+CONFIG_CHECKS = "checks"
+CONFIG_CREDENTIAL = "credential"
+CONFIG_NOTIFICATION = "notification"
+CONFIG_JOBS = "jobs"
+CONFIG_SCHEDULER = "scheduler"
class Job():
def __init__(
@@ -127,7 +138,7 @@ def parse_global(self):
print('###################################')
print(f'{bcolors.HEADER}Processing global section{bcolors.ENDC}')
print('###################################')
- config = self.parsed_config.get('global')
+ config = self.parsed_config.get(CONFIG_GLOBAL)
if config is None:
print(f"{bcolors.WARNING}Global: Nothing to configure credential with, found empty creds data{bcolors.ENDC}")
return
@@ -137,6 +148,31 @@ def parse_global(self):
print(f'Global: audit period {audit_period} days')
self.audit_period = audit_period
+ def parse_checks(self):
+ """parse_checks: This function parses the checks section of the config.
+ """
+ print('###################################')
+ print(f'{bcolors.HEADER}Processing checks section{bcolors.ENDC}')
+ print('###################################')
+ config = self.parsed_config.get(CONFIG_CHECKS)
+ if config is None:
+ print(f"{bcolors.WARNING}Checks: No checks config{bcolors.ENDC}")
+ return
+ arguments = config.get(CHECKS_ARGUMENTS_KEY_NAME)
+ if arguments is None:
+ print(f"{bcolors.WARNING}Checks: No arguments config{bcolors.ENDC}")
+ return
+ global_args = arguments.get(CHECKS_GLOBAL_KEY_NAME)
+ if global_args is None:
+ print(f"{bcolors.WARNING}Checks: No global config{bcolors.ENDC}")
+ return
+ # Ensure we atmost have ONLY one matrix argument
+ matrix_args = global_args.get(CHECKS_MATRIX_KEY_NAME)
+ if matrix_args is None:
+ return
+ if len(matrix_args) > 1:
+ print(f'{bcolors.FAIL} Only one matrix argument supported {bcolors.ENDC}')
+ return
def configure_credential(self):
"""configure_credential: This function is used to parse through the creds_dict and
@@ -145,7 +181,7 @@ def configure_credential(self):
print('###################################')
print(f'{bcolors.HEADER}Processing credential section{bcolors.ENDC}')
print('###################################')
- creds_dict = self.parsed_config.get('credential')
+ creds_dict = self.parsed_config.get(CONFIG_CREDENTIAL)
if creds_dict is None:
print(f"{bcolors.WARNING}Credential: Nothing to configure credential with, found empty creds data{bcolors.ENDC}")
return
@@ -185,7 +221,7 @@ def configure_schedule(self):
print('###################################')
print(f'{bcolors.HEADER}Processing scheduler section{bcolors.ENDC}')
print('###################################')
- config = self.parsed_config.get('scheduler')
+ config = self.parsed_config.get(CONFIG_SCHEDULER)
if config is None:
print(f"{bcolors.WARNING}Scheduler: No scheduler configuration found{bcolors.ENDC}")
return
@@ -248,7 +284,7 @@ def parse_jobs(self):
print('###################################')
print(f'{bcolors.HEADER}Processing jobs section{bcolors.ENDC}')
print('###################################')
- config = self.parsed_config.get('jobs')
+ config = self.parsed_config.get(CONFIG_JOBS)
if config is None:
print(f'{bcolors.WARNING}Jobs: No jobs config found{bcolors.ENDC}')
return
@@ -299,6 +335,7 @@ def main():
config_parser.parse_config_yaml()
config_parser.parse_global()
+ config_parser.parse_checks()
config_parser.configure_credential()
config_parser.parse_jobs()
config_parser.configure_schedule()
diff --git a/unskript-ctl/unskript_ctl_gen_report.py b/unskript-ctl/unskript_ctl_gen_report.py
index e1d5f30f5..84f696a65 100644
--- a/unskript-ctl/unskript_ctl_gen_report.py
+++ b/unskript-ctl/unskript_ctl_gen_report.py
@@ -76,8 +76,8 @@ def unskript_ctl_config_read_notification(n_type: str):
print(f"No Notification found for {n_type}")
return {}
-def send_notification(summary_result_table: list,
- failed_result: dict,
+def send_notification(summary_result_table: list,
+ failed_result: dict,
output_metadata_file: str = None):
"""send_notification: This function is called by unskript-ctl or
unctl to send notification of any given result. The requirement is that
@@ -179,6 +179,7 @@ def send_email_notification(summary_results: list,
if not creds_data:
print("ERROR: Mail Notification setting is empty. Cannot send Mail out")
+ subject = creds_data.get('email_subject_line', 'Run Result')
if creds_data.get('provider').lower() == "smtp":
c_data = creds_data.get('SMTP')
retval = send_smtp_notification(summary_results,
@@ -188,7 +189,8 @@ def send_email_notification(summary_results: list,
c_data.get('smtp-user'),
c_data.get('smtp-password'),
c_data.get('to-email'),
- c_data.get('from-email'))
+ c_data.get('from-email'),
+ subject)
elif creds_data.get('provider').lower() == "sendgrid":
c_data = creds_data.get('Sendgrid')
retval = send_sendgrid_notification(summary_results,
@@ -196,7 +198,8 @@ def send_email_notification(summary_results: list,
output_metadata_file,
c_data.get('from-email'),
c_data.get('to-email'),
- c_data.get('api_key'))
+ c_data.get('api_key'),
+ subject)
elif creds_data.get('provider').lower() == "ses":
c_data = creds_data.get('SES')
retval = prepare_to_send_awsses_notification(summary_results,
@@ -206,7 +209,8 @@ def send_email_notification(summary_results: list,
c_data.get('secret_access'),
c_data.get('to-email'),
c_data.get('from-email'),
- c_data.get('region'))
+ c_data.get('region'),
+ subject)
else:
print(f"ERROR: Unknown notification service {creds_data.get('provider')}")
@@ -219,21 +223,22 @@ def prepare_to_send_awsses_notification(summary_results: list,
secret_key: str,
to_email: str,
from_email: str,
- region: str):
+ region: str,
+ subject: str):
if not access_key or not secret_key:
print("ERROR: Cannot send AWS SES Notification without access and/or secret_key")
return False
-
+
# WE CAN TAKE A TITLE AS WELL, IF WE WANT CUSTOM TITLE IN THE REPORT
attachment_ = MIMEMultipart('mixed')
- attachment_['Subject'] = 'unSkript ctl Run Result'
-
+ attachment_['Subject'] = subject
+
attachment_ = prepare_combined_email(summary_results=summary_results,
failed_result=failed_result,
output_metadata_file=output_metadata_file,
title=None,
attachment=attachment_)
-
+
return do_send_awsses_email(from_email=from_email,
to_email=to_email,
attachment_=attachment_,
@@ -296,7 +301,8 @@ def send_sendgrid_notification(summary_results: list,
output_metadata_file: str,
from_email: str,
to_email: str,
- api_key: str):
+ api_key: str,
+ subject: str):
# Dynamic Load (Import) necessary libraries for sendgrid
import sendgrid
from sendgrid import SendGridAPIClient
@@ -305,21 +311,21 @@ def send_sendgrid_notification(summary_results: list,
if not from_email or not to_email or not api_key:
print("ERROR: From Email, To Email and API Key are mandatory parameters to send email notification")
return False
-
+
html_message = ''
- email_subject = 'unSkript-ctl Check Run result'
+ email_subject = subject
parent_folder = UNSKRIPT_GLOBALS.get('CURRENT_EXECUTION_RUN_DIRECTORY')
target_name = os.path.basename(parent_folder)
tar_file_name = f"{target_name}" + '.tar.bz2'
target_file_name = None
metadata = None
- try:
+ try:
# We can have custom Title here
html_message += create_email_header(title=None)
if summary_results and len(summary_results):
html_message += create_checks_summary_message(summary_results=summary_results,
failed_result=failed_result)
- create_temp_files_of_failed_results(failed_result=failed_result)
+ create_temp_files_of_failed_results(failed_result=failed_result)
if output_metadata_file:
html_message += create_script_summary_message(output_metadata_file=output_metadata_file)
with open(output_metadata_file, 'r') as f:
@@ -333,23 +339,23 @@ def send_sendgrid_notification(summary_results: list,
output_metadata_file = output_metadata_file.split('/')[-1]
if create_tarball_archive(tar_file_name=tar_file_name,
output_metadata_file=output_metadata_file,
- parent_folder=parent_folder) is False:
+ parent_folder=parent_folder) is False:
raise ValueError("ERROR: Archiving attachments failed!")
target_file_name = tar_file_name
else:
if create_tarball_archive(tar_file_name=tar_file_name,
output_metadata_file=None,
- parent_folder=parent_folder) is False:
+ parent_folder=parent_folder) is False:
raise ValueError("ERROR: Archiving attachments failed!")
target_file_name = tar_file_name
-
+
email_message = Mail(
from_email=from_email,
to_emails=to_email,
subject=email_subject,
html_content=html_message
)
- if target_file_name:
+ if target_file_name:
email_message = sendgrid_add_email_attachment(email_message=email_message,
file_to_attach=target_file_name,
compress=True)
@@ -365,10 +371,10 @@ def send_sendgrid_notification(summary_results: list,
except Exception as e:
print(f"ERROR: Unable to send notification as email. {e}")
return False
-
+
return True
-def sendgrid_add_email_attachment(email_message,
+def sendgrid_add_email_attachment(email_message,
file_to_attach: str,
compress: bool = True):
from sendgrid.helpers.mail import Attachment, FileContent, FileName, FileType
@@ -386,7 +392,7 @@ def sendgrid_add_email_attachment(email_message,
attachment.file_type = FileType('application/text')
attachment.disposition = 'attachment'
email_message.add_attachment(attachment)
-
+
return email_message
@@ -413,7 +419,7 @@ def create_checks_summary_message(summary_results: list,
message = ''
if not summary_results or not failed_result:
return message
-
+
if len(summary_results):
p = f = e = 0
tr_message = ''
@@ -464,7 +470,7 @@ def create_script_summary_message(output_metadata_file: str):
if not metadata:
print(f'ERROR: Metadata is empty for the script. Please check content of {output_metadata_file}')
raise ValueError("Metadata is empty")
-
+
message += f'''