diff --git a/accountstats/views.py b/accountstats/views.py index 0399274..3997caa 100644 --- a/accountstats/views.py +++ b/accountstats/views.py @@ -46,7 +46,7 @@ def account(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30)) +@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_application(request, account): data = [] query_alloc = 'slurm_job:process_usage:sum_account{{account="{}", {}}}'.format(account, prom.get_filter()) @@ -70,7 +70,11 @@ def graph_application(request, account): def graph(request, query, stacked=True, unit=None): data = [] - stats_alloc = prom.query_prometheus_multiple(query, request.start, request.end, step=request.step) + stats_alloc = prom.query_prometheus_multiple( + query, + request.start, + end=request.end, + step=request.step) for line in stats_alloc: user = line['metric']['user'] x = list(map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), line['x'])) @@ -94,7 +98,7 @@ def graph(request, query, stacked=True, unit=None): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30)) +@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_cpu_allocated(request, account): query_alloc = 'sum(slurm_job:allocated_core:count_user_account{{account="{}", {}}}) by (user)'.format(account, prom.get_filter()) return graph(request, query_alloc, unit=_('cores')) @@ -102,7 +106,7 @@ def graph_cpu_allocated(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30)) +@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_cpu_used(request, account): query_used = 'sum(slurm_job:used_core:sum_user_account{{account="{}", {}}}) by (user)'.format(account, prom.get_filter()) return graph(request, query_used, unit=_('cores')) @@ -110,7 +114,7 @@ def graph_cpu_used(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30)) +@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_cpu_wasted(request, account): query_alloc = 'clamp_min(sum(slurm_job:allocated_core:count_user_account{{account="{}", {}}}) by (user) - sum(slurm_job:used_core:sum_user_account{{account="{}", {}}}) by (user), 0)'.format(account, prom.get_filter(), account, prom.get_filter()) return graph(request, query_alloc, stacked=False, unit=_('cores')) @@ -118,7 +122,7 @@ def graph_cpu_wasted(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30)) +@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_mem_allocated(request, account): query_alloc = 'sum(slurm_job:allocated_memory:sum_user_account{{account="{}", {}}}) by (user) /(1024*1024*1024)'.format(account, prom.get_filter()) return graph(request, query_alloc, unit=_('GiB')) @@ -126,7 +130,7 @@ def graph_mem_allocated(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30)) +@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_mem_used(request, account): query_used = 'sum(slurm_job:rss_memory:sum_user_account{{account="{}", {}}}) by (user) /(1024*1024*1024)'.format(account, prom.get_filter()) return graph(request, query_used, unit=_('GiB')) @@ -134,7 +138,7 @@ def graph_mem_used(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30)) +@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_mem_wasted(request, account): query_alloc = 'clamp_min(sum(slurm_job:allocated_memory:sum_user_account{{account="{}", {}}}) by (user) - sum(slurm_job:rss_memory:sum_user_account{{account="{}", {}}}) by (user), 0) /(1024*1024*1024)'.format(account, prom.get_filter(), account, prom.get_filter()) return graph(request, query_alloc, stacked=False, unit=_('GiB')) @@ -142,7 +146,7 @@ def graph_mem_wasted(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(hours=6)) +@parse_start_end(default_start=datetime.now() - timedelta(hours=6), minimum=prom.rate('lustre_exporter')) def graph_lustre_mdt(request, account): query = 'sum(rate(lustre_job_stats_total{{component=~"mdt",account=~"{}", {}}}[5m])) by (user, fs) !=0'.format(account, prom.get_filter()) return graph(request, query, stacked=False, unit=_('IOPS')) @@ -150,11 +154,16 @@ def graph_lustre_mdt(request, account): @login_required @account_or_staff +@parse_start_end(default_start=datetime.now() - timedelta(hours=6), minimum=prom.rate('lustre_exporter')) def graph_lustre_ost(request, account): data = [] for i in ['read', 'write']: query = '(sum(rate(lustre_job_{}_bytes_total{{component=~"ost",account=~"{}",target=~".*-OST.*", {}}}[5m])) by (user, fs)) / (1024*1024)'.format(i, account, prom.get_filter()) - stats = prom.query_prometheus_multiple(query, datetime.now() - timedelta(hours=6), datetime.now()) + stats = prom.query_prometheus_multiple( + query, + request.start, + request.end, + step=request.step) for line in stats: fs = line['metric']['fs'] @@ -183,7 +192,7 @@ def graph_lustre_ost(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30)) +@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_gpu_allocated(request, account): query = 'sum(slurm_job:allocated_gpu:count_user_account{{account="{}", {}}}) by (user)'.format(account, prom.get_filter()) return graph(request, query, unit=_('GPUs')) @@ -191,7 +200,7 @@ def graph_gpu_allocated(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30)) +@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_gpu_used(request, account): query = 'sum(slurm_job:used_gpu:sum_user_account{{account="{}", {}}}) by (user)'.format(account, prom.get_filter()) return graph(request, query, unit=_('GPUs')) @@ -199,7 +208,7 @@ def graph_gpu_used(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30)) +@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) def graph_gpu_wasted(request, account): query = 'sum(slurm_job:allocated_gpu:count_user_account{{account="{}", {}}}) by (user) - sum(slurm_job:used_gpu:sum_user_account{{account="{}", {}}}) by (user)'.format(account, prom.get_filter(), account, prom.get_filter()) return graph(request, query, stacked=False, unit=_('GPUs')) @@ -207,7 +216,7 @@ def graph_gpu_wasted(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30)) +@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) # kinda broken when using multiple GPUs def graph_gpu_power_allocated(request, account): query = 'count(slurm_job_power_gpu{{account="{}", {}}}) by (user) * 300'.format(account, prom.get_filter()) @@ -216,7 +225,7 @@ def graph_gpu_power_allocated(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30)) +@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) # kinda broken when using multiple GPUs def graph_gpu_power_used(request, account): query = 'sum(slurm_job_power_gpu{{account="{}", {}}}) by (user) / 1000'.format(account, prom.get_filter()) @@ -225,7 +234,7 @@ def graph_gpu_power_used(request, account): @login_required @account_or_staff -@parse_start_end(default_start=datetime.now() - timedelta(days=30)) +@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter')) # kinda broken when using multiple GPUs def graph_gpu_power_wasted(request, account): query = '(count(slurm_job_power_gpu{{account="{}", {}}}) by (user) * 300) - (sum(slurm_job_power_gpu{{account="{}", {}}}) by (user) / 1000)'.format(account, prom.get_filter(), account, prom.get_filter()) @@ -245,7 +254,7 @@ def graph_gpu_priority(request, account): # auth done in functions above -@parse_start_end(default_start=datetime.now() - timedelta(days=90)) +@parse_start_end(default_start=datetime.now() - timedelta(days=90), minimum=prom.rate('slurm-job-exporter')) def graph_cpu_or_gpu_priority(request, account, gpu_or_cpu): data = [] if gpu_or_cpu == 'gpu': diff --git a/cloudstats/views.py b/cloudstats/views.py index 2f2b2dc..7fbac91 100644 --- a/cloudstats/views.py +++ b/cloudstats/views.py @@ -1,6 +1,6 @@ from django.shortcuts import render from django.http import JsonResponse -from userportal.common import openstackproject_or_staff, cloud_projects_by_user, request_to_username, staff, Prometheus, query_time +from userportal.common import openstackproject_or_staff, cloud_projects_by_user, request_to_username, staff, Prometheus, parse_start_end from userportal.common import anonymize as a from django.conf import settings from datetime import datetime, timedelta @@ -63,13 +63,17 @@ def instance(request, project, uuid): @login_required @openstackproject_or_staff +@parse_start_end(minimum=prom.rate('libvirtd_exporter')) def project_graph_cpu(request, project): data = [] - timing = query_time(request) query_used = 'sum(rate(libvirtd_domain_vcpu_time{{project_name="{project}", {filter}}}[1m])) by (uuid,instance_name) / 1000000000'.format( project=project, filter=prom.get_filter('cloudstats')) - stats_used = prom.query_prometheus_multiple(query_used, timing[0], step=timing[1]) + stats_used = prom.query_prometheus_multiple( + query_used, + request.start, + end=request.end, + step=request.step) # Only show UUID if required instance_names = [] @@ -97,7 +101,11 @@ def project_graph_cpu(request, project): query_running = 'sum(count(libvirtd_domain_vcpu_time{{project_name="{project}", {filter}}}))'.format( project=project, filter=prom.get_filter('cloudstats')) - stats_running = prom.query_prometheus_multiple(query_running, timing[0], step=timing[1]) + stats_running = prom.query_prometheus_multiple( + query_running, + request.start, + end=request.end, + step=request.step) for line in stats_running: x = list(map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), line['x'])) y = line['y'] @@ -120,12 +128,16 @@ def project_graph_cpu(request, project): @login_required @staff +@parse_start_end(minimum=prom.rate('libvirtd_exporter')) def projects_graph_cpu(request): data = [] - timing = query_time(request) query_used = 'sum(rate(libvirtd_domain_vcpu_time{{ {filter} }}[5m])) by (project_name) / 1000000000'.format( filter=prom.get_filter('cloudstats')) - stats_used = prom.query_prometheus_multiple(query_used, timing[0], step=timing[1]) + stats_used = prom.query_prometheus_multiple( + query_used, + request.start, + end=request.end, + step=request.step) for line in stats_used: data.append({ 'x': list(map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), line['x'])), @@ -138,7 +150,11 @@ def projects_graph_cpu(request): query_running = 'sum(count(libvirtd_domain_vcpu_time{{ {filter} }})) by (project_name)'.format( filter=prom.get_filter('cloudstats')) - stats_running = prom.query_prometheus_multiple(query_running, timing[0], step=timing[1]) + stats_running = prom.query_prometheus_multiple( + query_running, + request.start, + end=request.end, + step=request.step) for line in stats_running: data.append({ 'x': list(map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), line['x'])), @@ -158,14 +174,18 @@ def projects_graph_cpu(request): @login_required @openstackproject_or_staff +@parse_start_end(minimum=prom.rate('libvirtd_exporter')) def instance_graph_cpu(request, project, uuid): data = [] - timing = query_time(request) query_used = 'rate(libvirtd_domain_vcpu_time{{project_name="{project}", uuid="{uuid}", {filter}}}[1m]) / 1000000000'.format( project=project, uuid=uuid, filter=prom.get_filter('cloudstats')) - stats_used = prom.query_prometheus_multiple(query_used, timing[0], step=timing[1]) + stats_used = prom.query_prometheus_multiple( + query_used, + request.start, + end=request.end, + step=request.step) for line in stats_used: x = list(map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), line['x'])) @@ -184,7 +204,11 @@ def instance_graph_cpu(request, project, uuid): project=project, uuid=uuid, filter=prom.get_filter('cloudstats')) - stats_running = prom.query_prometheus_multiple(query_running, timing[0], step=timing[1]) + stats_running = prom.query_prometheus_multiple( + query_running, + request.start, + end=request.end, + step=request.step) for line in stats_running: x = list(map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), line['x'])) y = line['y'] @@ -207,13 +231,17 @@ def instance_graph_cpu(request, project, uuid): @login_required @openstackproject_or_staff +@parse_start_end(minimum=prom.rate('libvirtd_exporter')) def project_graph_memory(request, project): data = [] - timing = query_time(request) query_used = '(libvirtd_domain_balloon_current{{project_name="{project}", {filter}}} - libvirtd_domain_balloon_usable{{project_name="{project}", {filter}}})/1024/1024'.format( project=project, filter=prom.get_filter('cloudstats')) - stats_used = prom.query_prometheus_multiple(query_used, timing[0], step=timing[1]) + stats_used = prom.query_prometheus_multiple( + query_used, + request.start, + end=request.end, + step=request.step) # Only show UUID if required instance_names = [] @@ -240,7 +268,11 @@ def project_graph_memory(request, project): query_running = 'sum(libvirtd_domain_balloon_current{{project_name="{project}", {filter}}})/1024/1024'.format( project=project, filter=prom.get_filter('cloudstats')) - stats_running = prom.query_prometheus_multiple(query_running, timing[0], step=timing[1]) + stats_running = prom.query_prometheus_multiple( + query_running, + request.start, + end=request.end, + step=request.step) for line in stats_running: x = list(map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), line['x'])) y = line['y'] @@ -264,14 +296,18 @@ def project_graph_memory(request, project): @login_required @openstackproject_or_staff +@parse_start_end(minimum=prom.rate('libvirtd_exporter')) def instance_graph_memory(request, project, uuid): data = [] - timing = query_time(request) query_used = '(libvirtd_domain_balloon_current{{project_name="{project}", uuid="{uuid}", {filter}}} - libvirtd_domain_balloon_usable{{project_name="{project}", uuid="{uuid}", {filter}}})/1024/1024'.format( project=project, uuid=uuid, filter=prom.get_filter('cloudstats')) - stats_used = prom.query_prometheus_multiple(query_used, timing[0], step=timing[1]) + stats_used = prom.query_prometheus_multiple( + query_used, + request.start, + end=request.end, + step=request.step) for line in stats_used: x = list(map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), line['x'])) @@ -288,7 +324,11 @@ def instance_graph_memory(request, project, uuid): project=project, uuid=uuid, filter=prom.get_filter('cloudstats')) - stats_running = prom.query_prometheus_multiple(query_running, timing[0], step=timing[1]) + stats_running = prom.query_prometheus_multiple( + query_running, + request.start, + end=request.end, + step=request.step) for line in stats_running: x = list(map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), line['x'])) y = line['y'] @@ -312,12 +352,16 @@ def instance_graph_memory(request, project, uuid): @login_required @staff +@parse_start_end(minimum=prom.rate('libvirtd_exporter')) def projects_graph_mem(request): data = [] - timing = query_time(request) query_used = 'sum((libvirtd_domain_balloon_current{{ {filter} }} - libvirtd_domain_balloon_usable{{ {filter} }})/1024/1024) by (project_name)'.format( filter=prom.get_filter('cloudstats')) - stats_used = prom.query_prometheus_multiple(query_used, timing[0], step=timing[1]) + stats_used = prom.query_prometheus_multiple( + query_used, + request.start, + end=request.end, + step=request.step) for line in stats_used: data.append({ 'x': list(map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), line['x'])), @@ -330,7 +374,11 @@ def projects_graph_mem(request): query_running = 'sum((libvirtd_domain_balloon_current{{ {filter} }})/1024/1024)'.format( filter=prom.get_filter('cloudstats')) - stats_running = prom.query_prometheus_multiple(query_running, timing[0], step=timing[1]) + stats_running = prom.query_prometheus_multiple( + query_running, + request.start, + end=request.end, + step=request.step) for line in stats_running: data.append({ 'x': list(map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), line['x'])), @@ -352,16 +400,20 @@ def projects_graph_mem(request): @login_required @openstackproject_or_staff +@parse_start_end(minimum=prom.rate('libvirtd_exporter')) def project_graph_disk_bandwidth(request, project): data = [] - timing = query_time(request) for direction in ['read', 'write']: query_bandwidth = 'rate(libvirtd_domain_block_{direction}_bytes{{project_name="{project}", {filter}}}[1m])/1024/1024'.format( direction=direction, project=project, filter=prom.get_filter('cloudstats')) - stats_bandwidth = prom.query_prometheus_multiple(query_bandwidth, timing[0], step=timing[1]) + stats_bandwidth = prom.query_prometheus_multiple( + query_bandwidth, + request.start, + end=request.end, + step=request.step) # Only show UUID if required instance_names = [] @@ -399,9 +451,9 @@ def project_graph_disk_bandwidth(request, project): @login_required @openstackproject_or_staff +@parse_start_end(minimum=prom.rate('libvirtd_exporter')) def instance_graph_disk_bandwidth(request, project, uuid): data = [] - timing = query_time(request) for direction in ['read', 'write']: query_bandwidth = 'rate(libvirtd_domain_block_{direction}_bytes{{project_name="{project}", uuid="{uuid}", {filter}}}[1m])/1024/1024'.format( @@ -409,7 +461,11 @@ def instance_graph_disk_bandwidth(request, project, uuid): project=project, uuid=uuid, filter=prom.get_filter('cloudstats')) - stats_bandwidth = prom.query_prometheus_multiple(query_bandwidth, timing[0], step=timing[1]) + stats_bandwidth = prom.query_prometheus_multiple( + query_bandwidth, + request.start, + end=request.end, + step=request.step) for line in stats_bandwidth: x = list(map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), line['x'])) @@ -438,16 +494,20 @@ def instance_graph_disk_bandwidth(request, project, uuid): @login_required @openstackproject_or_staff +@parse_start_end(minimum=prom.rate('libvirtd_exporter')) def project_graph_disk_iops(request, project): data = [] - timing = query_time(request) for direction in ['read', 'write']: query_bandwidth = 'rate(libvirtd_domain_block_{direction}_requests{{project_name="{project}", {filter}}}[1m])'.format( direction=direction, project=project, filter=prom.get_filter('cloudstats')) - stats_bandwidth = prom.query_prometheus_multiple(query_bandwidth, timing[0], step=timing[1]) + stats_bandwidth = prom.query_prometheus_multiple( + query_bandwidth, + request.start, + end=request.end, + step=request.step) # Only show UUID if required instance_names = [] @@ -484,9 +544,9 @@ def project_graph_disk_iops(request, project): @login_required @openstackproject_or_staff +@parse_start_end(minimum=prom.rate('libvirtd_exporter')) def instance_graph_disk_iops(request, project, uuid): data = [] - timing = query_time(request) for direction in ['read', 'write']: query_bandwidth = 'rate(libvirtd_domain_block_{direction}_requests{{project_name="{project}", uuid="{uuid}", {filter}}}[1m])'.format( @@ -494,7 +554,11 @@ def instance_graph_disk_iops(request, project, uuid): project=project, uuid=uuid, filter=prom.get_filter('cloudstats')) - stats_bandwidth = prom.query_prometheus_multiple(query_bandwidth, timing[0], step=timing[1]) + stats_bandwidth = prom.query_prometheus_multiple( + query_bandwidth, + request.start, + end=request.end, + step=request.step) for line in stats_bandwidth: x = list(map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), line['x'])) @@ -521,16 +585,20 @@ def instance_graph_disk_iops(request, project, uuid): @login_required @openstackproject_or_staff +@parse_start_end(minimum=prom.rate('libvirtd_exporter')) def project_graph_network_bandwidth(request, project): data = [] - timing = query_time(request) for direction in ['rx', 'tx']: query_bandwidth = 'rate(libvirtd_domain_net_{direction}_bytes{{project_name="{project}", {filter}}}[1m])/1024/1024'.format( direction=direction, project=project, filter=prom.get_filter('cloudstats')) - stats_bandwidth = prom.query_prometheus_multiple(query_bandwidth, timing[0], step=timing[1]) + stats_bandwidth = prom.query_prometheus_multiple( + query_bandwidth, + request.start, + end=request.end, + step=request.step) # Only show UUID if required instance_names = [] @@ -568,9 +636,9 @@ def project_graph_network_bandwidth(request, project): @login_required @openstackproject_or_staff +@parse_start_end(minimum=prom.rate('libvirtd_exporter')) def instance_graph_network_bandwidth(request, project, uuid): data = [] - timing = query_time(request) for direction in ['rx', 'tx']: query_bandwidth = 'rate(libvirtd_domain_net_{direction}_bytes{{project_name="{project}", uuid="{uuid}", {filter}}}[1m])/1024/1024'.format( @@ -578,7 +646,11 @@ def instance_graph_network_bandwidth(request, project, uuid): project=project, uuid=uuid, filter=prom.get_filter('cloudstats')) - stats_bandwidth = prom.query_prometheus_multiple(query_bandwidth, timing[0], step=timing[1]) + stats_bandwidth = prom.query_prometheus_multiple( + query_bandwidth, + request.start, + end=request.end, + step=request.step) for line in stats_bandwidth: x = list(map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), line['x'])) diff --git a/jobstats/templates/jobstats/job.html b/jobstats/templates/jobstats/job.html index d0458de..9b0bb46 100644 --- a/jobstats/templates/jobstats/job.html +++ b/jobstats/templates/jobstats/job.html @@ -794,45 +794,45 @@

{% translate "Power used" %}

{% include "notes.js" %}