Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

OOTB integration dashboards initiative script #18128

Draft
wants to merge 12 commits into
base: master
Choose a base branch
from
276 changes: 276 additions & 0 deletions .ddev/integrations-scripts/heuristics.csv

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions .ddev/integrations-scripts/heuristics.json

Large diffs are not rendered by default.

120 changes: 120 additions & 0 deletions .ddev/integrations-scripts/integration_dashboards.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
import os
from datetime import datetime, date
from utilities import ONE_YEAR, PATH, INTEGRATIONS, RESULTS_TXT_FILE, RESULTS_JSON_FILE, STATS_TXT_FILE

# all directories that include an 'assets/dashboards' directory
def get_dashboard_directories(top_path):
arr = []
for f in os.listdir(top_path):
dirPath = os.path.join(top_path, f)
if os.path.isdir(dirPath):
for a in os.listdir(dirPath):
if(a == "assets"):
assetsPath = os.path.join(dirPath, a)
for d in os.listdir(assetsPath):
if d == "dashboards":
arr.append(f)
return arr

def is_dashboard_outdated(modified_date, days_outdated):
date_split = modified_date.split('-')
difference_in_days = datetime.now() - datetime(int(date_split[0]), int(date_split[1]), int(date_split[2]))
if(difference_in_days.days > (days_outdated)):
return True
return False

# store all last modified date, name, and pathname in a file
def store_bash_calls_in_text_file(top_path, dirs, file_name):
for f in dirs:
dashboards_path = f + "/assets/dashboards"
full_dashboards_path = os.path.join(top_path, dashboards_path)
for d in os.listdir(full_dashboards_path):
full_path = os.path.join(full_dashboards_path, d)

logAdd = 'git log -n 1 --pretty=format:\"%cd%ae&%an\" --date=format:"%Y-%m-%d&" ' + full_path + ' >> ./' + file_name
os.system(logAdd)

path_for_json = os.path.join(dashboards_path, d)
os.system('echo \"&' + path_for_json + '\">> ./' + file_name)



def get_sorted_dict_by_modified_date(results_file):
file = open(results_file, 'r')

# iterate through files and store in an object
dict = []

while True:
# Get next line from file
line = file.readline()

# if line is empty end of file is reached
if not line:
break

attrs = line.split('&')
dict.append({'last_modified': attrs[0], 'email': attrs[1], 'name': attrs[2], 'path': attrs[3].strip()})

# python mutates array in place
dict.sort(
key=lambda x: datetime.strptime(x['last_modified'], '%Y-%m-%d')
)
file.close()
return dict

def store_sorted_dict_in_json_file(dict, file_name):
jsonFile = open(file_name, 'w')
jsonFile.write(str(dict).replace('\'', '\"'))
jsonFile.close()

def get_dashboard_stats(results_file):
total_integration_dashboards = 0
integration_dashboards_outdated_2_years = 0
integration_dashboards_outdated_1_year = 0
file = open(results_file, 'r')

while True:
# Get next line from file
line = file.readline()

# if line is empty end of file is reached
if not line:
break

attrs = line.split('&')
total_integration_dashboards += 1

if(is_dashboard_outdated(attrs[0], ONE_YEAR)):
integration_dashboards_outdated_1_year += 1

if(is_dashboard_outdated(attrs[0], ONE_YEAR * 2)):
integration_dashboards_outdated_2_years += 1

file.close()
return [total_integration_dashboards, integration_dashboards_outdated_2_years, integration_dashboards_outdated_1_year]


def store_dashboard_stats_in_text_file(stats, file_name):
file = open(file_name, 'w')
file.write('Results as of ' + str(date.today()) + '\n')
if(stats[0] > 0):
percent_outdated_1_year = (stats[2] / stats[0]) * 100
percent_outdated_2_years = (stats[1] / stats[0]) * 100
# include actual numbers
file.write('Total 1 year: ' + str(stats[2]) + '\n')
file.write('Total 2 years: ' + str(stats[1]) + '\n')
file.write('Total: ' + str(stats[0]) + '\n')
file.write('Dashboards outdated 1 year: ' + str(percent_outdated_1_year) + '%\n')
file.write('Dashboards outdated 2 years: ' + str(percent_outdated_2_years) + '%')
file.close()

def main():
store_bash_calls_in_text_file(PATH, INTEGRATIONS, RESULTS_TXT_FILE)
dict = get_sorted_dict_by_modified_date(RESULTS_TXT_FILE)
store_sorted_dict_in_json_file(dict, RESULTS_JSON_FILE)
stats = get_dashboard_stats(RESULTS_TXT_FILE)
store_dashboard_stats_in_text_file(stats, STATS_TXT_FILE)

if __name__=="__main__":
main()
176 changes: 176 additions & 0 deletions .ddev/integrations-scripts/parse_dashboard_jsons.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,176 @@
import json
import csv
import os
from utilities import PATH, HEURISTICS_JSON_FILE, HEURISTICS_CSV_FILE, RESULTS_TXT_FILE, DASHBOARD_NAMES_CSV_FILE
from integration_dashboards import get_sorted_dict_by_modified_date

def evaluate_widgets(widgets):
dict = {
'legend': {
'total': 0,
'show_legend': 0
},
'query_values': {
'total': 0,
'have_timeseries_background': 0,
'have_conditional_formats': 0,
}}
if(len(widgets) < 1):
return
for w in widgets:
definition = w['definition']

# show legends is true
if('show_legend' in definition):
dict['legend']['total'] += 1
if(definition['show_legend'] == True):
dict['legend']['show_legend'] += 1

# checking on query values
if(definition['type'] == 'query_value'):
dict['query_values']['total'] += 1

# uses timeseries background
if('timeseries_background' in definition):
dict['query_values']['have_timeseries_background'] += 1

# has conditional formatting
if('requests' in definition and 'conditional_formats' in definition['requests'][0] and len(definition['requests'][0]['conditional_formats']) > 0):
dict['query_values']['have_conditional_formats'] += 1
return dict


def parse_json_definitions(result_obj, id_map):
integration = open(os.path.join(PATH, result_obj['path']), 'r')
json_string = integration.read()
json_object = json.loads(json_string)

# default value
dict = {'path': result_obj['path'],
'last_modified': result_obj['last_modified'],
'email': result_obj['email'],
'title': '',
'id': '',
'short_name': '',
'has_ordered_layout': 'True',
'has_ungrouped_widgets': 'False',
'has_all_title_case_groups': "True",
'has_overview_section': 'False',
'about_section_contains_text': 'False',
'about_section_contains_banner_img': 'False',
'total_query_values': 0,
'query_values_have_timeseries_background': 0,
'query_values_have_conditional_formats': 0,
'total_widgets_legends': 0,
'widgets_legends_shown': 0,
}

is_first = True
for key in json_object:
if(key == 'layout_type'):
dict['has_ordered_layout'] = str(json_object[key] != 'free')

if(key == 'title'):
title_string = str(json_object[key])
dict['title'] = title_string
if(title_string.find(',') != -1):
# TODO: manually since csv file will be affected by comma
continue
dict['id'] = id_map[title_string]['id']
dict['short_name'] = id_map[title_string]['short_name']

if(key == 'widgets'):
all_widgets = json_object[key]
for widget in all_widgets:
definition = widget["definition"]
for def_key in definition:
# group or note widget at top level
if(def_key == 'type'):
if(definition[def_key] != ('group' or 'note')):
dict['has_ungrouped_widgets'] = str(True)

if(definition[def_key] == 'group'):
# has title case groups
group_title = str(definition['title']).strip()
if(len(group_title) > 1 and (not group_title.istitle())):
dict['has_all_title_case_groups'] = str(False)

# overview group
has_overview_section = group_title.lower().find('overview')
if(has_overview_section != -1):
dict['has_overview_section'] = str(True)


# first group
if(is_first):
contains_about_text = group_title.lower().find('about')
if(contains_about_text != -1):
dict['about_section_contains_text'] = str(True)
contains_banner_img = 'banner_img' in definition and definition['banner_img'] != None
if(contains_banner_img):
dict['about_section_contains_banner_img'] = str(True)

is_first = False


# iterate through widgets
if('widgets' in definition):
evaluated = evaluate_widgets(definition['widgets'])
dict['total_query_values'] += evaluated['query_values']['total']
dict['query_values_have_timeseries_background'] += evaluated['query_values']['have_timeseries_background']
dict['query_values_have_conditional_formats'] += evaluated['query_values']['have_conditional_formats']
dict['total_widgets_legends'] += evaluated['legend']['total']
dict['widgets_legends_shown'] += evaluated['legend']['show_legend']
return dict

def store_heuristics_in_json_file(heuristics_arr, file):
# write to json file
heuristics_json = open(file, 'w')
heuristics_json.write(str(heuristics_arr).replace('\'', '\"'))
heuristics_json.close()

def store_heuristics_in_csv_file(heuristics_arr, file):
# write to csv file
csv_data = json.loads(str(heuristics_arr).replace('\'', '\"'))
heuristics_csv = open(file, 'w')
csv_writer = csv.writer(heuristics_csv)

# counter used for writing headers to the CSV file
count = 0
for dashboard in csv_data:
if count == 0:
# headers
header = dashboard.keys()
csv_writer.writerow(header)
count += 1

# data
csv_writer.writerow(dashboard.values())
heuristics_csv.close()

# creates a mapping of dashboard names to ID and short name from the provided CSV file
def get_dashboard_names_map(file):
dashboard_names_csv = open(file, 'r')
csv_reader = csv.reader(dashboard_names_csv)
mapping = {}

for row in csv_reader:
mapping[row[2]] = {'id': row[0], 'short_name': row[1]}

return mapping


def main():
heuristics_arr = []
id_map = get_dashboard_names_map(DASHBOARD_NAMES_CSV_FILE)
stats_dict = get_sorted_dict_by_modified_date(RESULTS_TXT_FILE)
for obj in stats_dict:
dict = parse_json_definitions(obj, id_map)
heuristics_arr.append(dict)

store_heuristics_in_json_file(heuristics_arr, HEURISTICS_JSON_FILE)
store_heuristics_in_csv_file(heuristics_arr, HEURISTICS_CSV_FILE)


if __name__=="__main__":
main()
Loading
Loading