Skip to content

Commit

Permalink
The parameters of a decorator are evaluated when python starts (#68)
Browse files Browse the repository at this point in the history
This means datetime.now() will not update in long-lived Django Python process. Timedelta must be used instead and the delta is calculated within the function and not in the parameters.
  • Loading branch information
guilbaults authored Nov 12, 2024
1 parent 099a493 commit e2d85d5
Show file tree
Hide file tree
Showing 5 changed files with 45 additions and 43 deletions.
34 changes: 17 additions & 17 deletions accountstats/views.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from django.shortcuts import render
from django.http import JsonResponse
from django.conf import settings
from datetime import datetime, timedelta
from datetime import timedelta
from django.contrib.auth.decorators import login_required
from django.utils.translation import gettext as _
from userportal.common import account_or_staff, Prometheus, parse_start_end
Expand Down Expand Up @@ -46,7 +46,7 @@ def account(request, account):

@login_required
@account_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
def graph_application(request, account):
data = []
query_alloc = 'slurm_job:process_usage:sum_account{{account="{}", {}}}'.format(account, prom.get_filter())
Expand Down Expand Up @@ -98,63 +98,63 @@ def graph(request, query, stacked=True, unit=None):

@login_required
@account_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
def graph_cpu_allocated(request, account):
query_alloc = 'sum(slurm_job:allocated_core:count_user_account{{account="{}", {}}}) by (user)'.format(account, prom.get_filter())
return graph(request, query_alloc, unit=_('cores'))


@login_required
@account_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
def graph_cpu_used(request, account):
query_used = 'sum(slurm_job:used_core:sum_user_account{{account="{}", {}}}) by (user)'.format(account, prom.get_filter())
return graph(request, query_used, unit=_('cores'))


@login_required
@account_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
def graph_cpu_wasted(request, account):
query_alloc = 'clamp_min(sum(slurm_job:allocated_core:count_user_account{{account="{}", {}}}) by (user) - sum(slurm_job:used_core:sum_user_account{{account="{}", {}}}) by (user), 0)'.format(account, prom.get_filter(), account, prom.get_filter())
return graph(request, query_alloc, stacked=False, unit=_('cores'))


@login_required
@account_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
def graph_mem_allocated(request, account):
query_alloc = 'sum(slurm_job:allocated_memory:sum_user_account{{account="{}", {}}}) by (user) /(1024*1024*1024)'.format(account, prom.get_filter())
return graph(request, query_alloc, unit=_('GiB'))


@login_required
@account_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
def graph_mem_used(request, account):
query_used = 'sum(slurm_job:rss_memory:sum_user_account{{account="{}", {}}}) by (user) /(1024*1024*1024)'.format(account, prom.get_filter())
return graph(request, query_used, unit=_('GiB'))


@login_required
@account_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
def graph_mem_wasted(request, account):
query_alloc = 'clamp_min(sum(slurm_job:allocated_memory:sum_user_account{{account="{}", {}}}) by (user) - sum(slurm_job:rss_memory:sum_user_account{{account="{}", {}}}) by (user), 0) /(1024*1024*1024)'.format(account, prom.get_filter(), account, prom.get_filter())
return graph(request, query_alloc, stacked=False, unit=_('GiB'))


@login_required
@account_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(hours=6), minimum=prom.rate('lustre_exporter'))
@parse_start_end(timedelta_start=timedelta(hours=6), minimum=prom.rate('lustre_exporter'))
def graph_lustre_mdt(request, account):
query = 'sum(rate(lustre_job_stats_total{{component=~"mdt",account=~"{}", {}}}[5m])) by (user, fs) !=0'.format(account, prom.get_filter())
return graph(request, query, stacked=False, unit=_('IOPS'))


@login_required
@account_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(hours=6), minimum=prom.rate('lustre_exporter'))
@parse_start_end(timedelta_start=timedelta(hours=6), minimum=prom.rate('lustre_exporter'))
def graph_lustre_ost(request, account):
data = []
for i in ['read', 'write']:
Expand Down Expand Up @@ -192,31 +192,31 @@ def graph_lustre_ost(request, account):

@login_required
@account_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
def graph_gpu_allocated(request, account):
query = 'sum(slurm_job:allocated_gpu:count_user_account{{account="{}", {}}}) by (user)'.format(account, prom.get_filter())
return graph(request, query, unit=_('GPUs'))


@login_required
@account_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
def graph_gpu_used(request, account):
query = 'sum(slurm_job:used_gpu:sum_user_account{{account="{}", {}}}) by (user)'.format(account, prom.get_filter())
return graph(request, query, unit=_('GPUs'))


@login_required
@account_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
def graph_gpu_wasted(request, account):
query = 'sum(slurm_job:allocated_gpu:count_user_account{{account="{}", {}}}) by (user) - sum(slurm_job:used_gpu:sum_user_account{{account="{}", {}}}) by (user)'.format(account, prom.get_filter(), account, prom.get_filter())
return graph(request, query, stacked=False, unit=_('GPUs'))


@login_required
@account_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
# kinda broken when using multiple GPUs
def graph_gpu_power_allocated(request, account):
query = 'count(slurm_job_power_gpu{{account="{}", {}}}) by (user) * 300'.format(account, prom.get_filter())
Expand All @@ -225,7 +225,7 @@ def graph_gpu_power_allocated(request, account):

@login_required
@account_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
# kinda broken when using multiple GPUs
def graph_gpu_power_used(request, account):
query = 'sum(slurm_job_power_gpu{{account="{}", {}}}) by (user) / 1000'.format(account, prom.get_filter())
Expand All @@ -234,7 +234,7 @@ def graph_gpu_power_used(request, account):

@login_required
@account_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
@parse_start_end(timedelta_start=timedelta(days=30), minimum=prom.rate('slurm-job-exporter'))
# kinda broken when using multiple GPUs
def graph_gpu_power_wasted(request, account):
query = '(count(slurm_job_power_gpu{{account="{}", {}}}) by (user) * 300) - (sum(slurm_job_power_gpu{{account="{}", {}}}) by (user) / 1000)'.format(account, prom.get_filter(), account, prom.get_filter())
Expand All @@ -254,7 +254,7 @@ def graph_gpu_priority(request, account):


# auth done in functions above
@parse_start_end(default_start=datetime.now() - timedelta(days=90), minimum=prom.rate('slurm-job-exporter'))
@parse_start_end(timedelta_start=timedelta(days=90), minimum=prom.rate('slurm-job-exporter'))
def graph_cpu_or_gpu_priority(request, account, gpu_or_cpu):
data = []
if gpu_or_cpu == 'gpu':
Expand Down
12 changes: 6 additions & 6 deletions jobstats/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -603,7 +603,7 @@ def graph_cpu(request, username, job_id):

@login_required
@user_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=7))
@parse_start_end(timedelta_start=timedelta(days=7))
def graph_cpu_user(request, username):
data = []
try:
Expand Down Expand Up @@ -640,7 +640,7 @@ def graph_cpu_user(request, username):

@login_required
@user_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=7))
@parse_start_end(timedelta_start=timedelta(days=7))
def graph_mem_user(request, username):
data = []
try:
Expand Down Expand Up @@ -903,7 +903,7 @@ def graph_lustre_mdt(request, username, job_id):

@login_required
@user_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(hours=6))
@parse_start_end(timedelta_start=timedelta(hours=6))
def graph_lustre_mdt_user(request, username):
query = 'sum(rate(lustre_job_stats_total{{component=~"mdt",user=~"{}", {}}}[{}s])) by (operation, fs) !=0'.format(username, prom.get_filter(), prom.rate('lustre_exporter'))
stats = prom.query_prometheus_multiple(query, request.start, request.end, step=request.step)
Expand Down Expand Up @@ -979,7 +979,7 @@ def graph_lustre_ost(request, username, job_id):

@login_required
@user_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(hours=6))
@parse_start_end(timedelta_start=timedelta(hours=6))
def graph_lustre_ost_user(request, username):
data = []
for i in ['read', 'write']:
Expand Down Expand Up @@ -1067,7 +1067,7 @@ def graph_gpu_utilization(request, username, job_id):

@login_required
@user_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=7))
@parse_start_end(timedelta_start=timedelta(days=7))
def graph_gpu_utilization_user(request, username):
data = []

Expand Down Expand Up @@ -1231,7 +1231,7 @@ def graph_gpu_power(request, username, job_id):

@login_required
@user_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=2))
@parse_start_end(timedelta_start=timedelta(days=2))
def graph_gpu_power_user(request, username):
data = []

Expand Down
24 changes: 12 additions & 12 deletions nodes/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ def node_state(node):

@login_required
@staff
@parse_start_end(default_start=datetime.now() - timedelta(days=7))
@parse_start_end(timedelta_start=timedelta(days=7))
def graph_disk_used(request, node):
query_disk = '(node_filesystem_size_bytes{{{hostname_label}=~"{node}(:.*)", {filter}}} - node_filesystem_avail_bytes{{{hostname_label}=~"{node}(:.*)", {filter}}})/(1000*1000*1000)'.format(
hostname_label=settings.PROM_NODE_HOSTNAME_LABEL,
Expand Down Expand Up @@ -332,7 +332,7 @@ def graph_disk_used(request, node):

@login_required
@staff
@parse_start_end(default_start=datetime.now() - timedelta(days=7))
@parse_start_end(timedelta_start=timedelta(days=7))
def graph_cpu_jobstats(request, node):
query = 'sum(rate(slurm_job_core_usage_total{{{hostname_label}=~"{node}(:.*)", {filter}}}[{step}s]) / 1000000000) by (user, slurmjobid)'.format(
hostname_label=settings.PROM_NODE_HOSTNAME_LABEL,
Expand Down Expand Up @@ -363,7 +363,7 @@ def graph_cpu_jobstats(request, node):

@login_required
@staff
@parse_start_end(default_start=datetime.now() - timedelta(days=7))
@parse_start_end(timedelta_start=timedelta(days=7))
def graph_cpu_node(request, node):
query = 'sum by (mode)(irate(node_cpu_seconds_total{{mode!="idle",{hostname_label}=~"{node}(:.*)",{filter}}}[{step}s]))'.format(
hostname_label=settings.PROM_NODE_HOSTNAME_LABEL,
Expand Down Expand Up @@ -395,7 +395,7 @@ def graph_cpu_node(request, node):

@login_required
@staff
@parse_start_end(default_start=datetime.now() - timedelta(days=7))
@parse_start_end(timedelta_start=timedelta(days=7))
def graph_memory_jobstats(request, node):
query = '(sum(slurm_job_memory_usage{{{hostname_label}=~"{node}(:.*)", {filter}}}) by (user, slurmjobid))/(1024*1024*1024)'.format(
hostname_label=settings.PROM_NODE_HOSTNAME_LABEL,
Expand Down Expand Up @@ -426,7 +426,7 @@ def graph_memory_jobstats(request, node):

@login_required
@staff
@parse_start_end(default_start=datetime.now() - timedelta(days=7))
@parse_start_end(timedelta_start=timedelta(days=7))
def graph_memory_node(request, node):
data = []
query_apps = '(node_memory_MemTotal_bytes{{{hostname_label}=~"{node}(:.*)",{filter}}} - \
Expand Down Expand Up @@ -480,7 +480,7 @@ def graph_memory_node(request, node):

@login_required
@staff
@parse_start_end(default_start=datetime.now() - timedelta(days=7))
@parse_start_end(timedelta_start=timedelta(days=7))
def graph_ethernet_bdw(request, node):
data = []

Expand Down Expand Up @@ -517,7 +517,7 @@ def graph_ethernet_bdw(request, node):

@login_required
@staff
@parse_start_end(default_start=datetime.now() - timedelta(days=7))
@parse_start_end(timedelta_start=timedelta(days=7))
def graph_infiniband_bdw(request, node):
data = []
for direction in ['received', 'transmitted']:
Expand Down Expand Up @@ -553,7 +553,7 @@ def graph_infiniband_bdw(request, node):

@login_required
@staff
@parse_start_end(default_start=datetime.now() - timedelta(days=7))
@parse_start_end(timedelta_start=timedelta(days=7))
def graph_disk_iops(request, node):
data = []
for direction in ['reads', 'writes']:
Expand Down Expand Up @@ -589,7 +589,7 @@ def graph_disk_iops(request, node):

@login_required
@staff
@parse_start_end(default_start=datetime.now() - timedelta(days=7))
@parse_start_end(timedelta_start=timedelta(days=7))
def graph_disk_bdw(request, node):
data = []
for direction in ['read', 'written']:
Expand Down Expand Up @@ -625,7 +625,7 @@ def graph_disk_bdw(request, node):

@login_required
@staff
@parse_start_end(default_start=datetime.now() - timedelta(days=7))
@parse_start_end(timedelta_start=timedelta(days=7))
def graph_gpu_utilization(request, node):
data = []
queries = [
Expand Down Expand Up @@ -670,7 +670,7 @@ def graph_gpu_utilization(request, node):

@login_required
@staff
@parse_start_end(default_start=datetime.now() - timedelta(days=7))
@parse_start_end(timedelta_start=timedelta(days=7))
def graph_gpu_memory(request, node):
query = 'slurm_job_memory_usage_gpu{{{hostname_label}=~"{node}(:.*)", {filter}}} /(1024*1024*1024)'.format(
hostname_label=settings.PROM_NODE_HOSTNAME_LABEL,
Expand Down Expand Up @@ -703,7 +703,7 @@ def graph_gpu_memory(request, node):

@login_required
@staff
@parse_start_end(default_start=datetime.now() - timedelta(days=7))
@parse_start_end(timedelta_start=timedelta(days=7))
def graph_gpu_power(request, node):
query = 'slurm_job_power_gpu{{{hostname_label}=~"{node}(:.*)", {filter}}}/1000'.format(
hostname_label=settings.PROM_NODE_HOSTNAME_LABEL,
Expand Down
12 changes: 7 additions & 5 deletions userportal/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,9 +170,11 @@ def get_step(start, end, minimum=60):
return span


def parse_start_end(default_start=datetime.now() - timedelta(days=1), default_end=datetime.now(), minimum=60):
def parse_start_end(timedelta_start=timedelta(days=1), minimum=60):
""" From the GET parameters, add start and end to the request object
if delta is set, it will be used to calculate the start time from now() instead of start and end
The default parameters are evaluated when python is loaded, this why they are not a datetime object since they would not update when the decorator is called
"""
def decorator_wrapper(view_func):
def func_wrapper(request, *args, **kwargs):
Expand All @@ -185,17 +187,17 @@ def func_wrapper(request, *args, **kwargs):
try:
start = datetime.fromtimestamp(int(request.GET['start']))
except ValueError:
start = default_start
start = datetime.now() - timedelta_start
else:
start = default_start
start = datetime.now() - timedelta_start

if 'end' in request.GET:
try:
end = datetime.fromtimestamp(int(request.GET['end']))
except ValueError:
end = default_end
end = datetime.now()
else:
end = default_end
end = datetime.now()

# start and end can't be in the future
if start > datetime.now():
Expand Down
6 changes: 3 additions & 3 deletions usersummary/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from slurm.models import JobTable
from django.conf import settings
from django.http import JsonResponse, HttpResponseForbidden
from datetime import datetime, timedelta
from datetime import timedelta

prom = Prometheus(settings.PROMETHEUS)

Expand Down Expand Up @@ -79,7 +79,7 @@ def user(request, username):

@login_required
@user_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=90))
@parse_start_end(timedelta_start=timedelta(days=90))
def graph_inodes(request, username, resource_type, resource_name):
allocs = storage_allocations(username)
for alloc in allocs:
Expand Down Expand Up @@ -127,7 +127,7 @@ def graph_inodes(request, username, resource_type, resource_name):

@login_required
@user_or_staff
@parse_start_end(default_start=datetime.now() - timedelta(days=90))
@parse_start_end(timedelta_start=timedelta(days=90))
def graph_bytes(request, username, resource_type, resource_name):
allocs = storage_allocations(username)
for alloc in allocs:
Expand Down

0 comments on commit e2d85d5

Please sign in to comment.