From 3ac6425628019d3daf106cd81b97b05396d946f9 Mon Sep 17 00:00:00 2001 From: simplymathematics <15224968+simplymathematics@users.noreply.github.com> Date: Thu, 4 Apr 2024 03:44:24 +0200 Subject: [PATCH] Update pytorch plots (#179) * fix bug in scoring failure * update power example to support bit-depth search * update result directories * revert changes to example/power * add bit depth example * revert directory changes for power example * fixed directory for mnist in power example * fixed directory for mnist in power example * use the latest torch torchvision torchaudio * update workflow to push on PR * removed double stage folders in log folder * + more epochs for cifar100 * changed intervals from uniform to log uniform, made learning rate range larger * strip whitespace, covert o numeric in compile script * update git ignores * suport nb_epoch as defence choice * remove adv_success from requirements * add "NaN" to nones * update afr script * update mnist .dvc cache * updat cifar10 plots * uncomment paretoset in plotting * fix default defence bug and realtive pathing in compile script * moved plots to subfolder * better configuration support * fix compile script bug * update compile and plots yaml for power example * fix compile bug * update plots * include plot files in dvc * update afr to read from conf file * linting * linting * update pytorch example * update pytorch afr.yaml (not working) * split cleaning from plotting, but only working for examples/pytorch/mnist * working cleaning script * fix pytorch examples with new clean script * remove debug check from parse_results * make deckard a depedendency of the parsing script * made models.sh easier to read * update afr for pytorch example * update power example * update dvc.lock for pytorch example * update pytorch/cifar100 * update power/plots (not working) * add docstrings to plots.py * update power example with merge script * add power data * update configs * add combined plots * update afr models * added support for dummy variables in afr * ++combined_plots.py and fix afr bug * add cifar100 l4 power data with commenting everything else * add varepsilon to attack params * add dummy variables * fix rounding bug * update to newest plots * newest plots for power example * linting * removed old afr file * linting * Merge branch 'fix-compile-script' of github.com:simplymathematics/deckard into fix-compile-script * update conf * fixed kepler script bug * linting * linting * linting * linting * linting * linting * linting * linting * linting * add dvc.yaml * add query_kepler * update layers * update confs * fix post-merge scripts * linting * linting * update setup scripts * update gitignore * update gitignore * model.py layer * update gitignore * update gitignore * update gitignore * update gitignore * update gitignore * update gitignore * refactor fit method out of plot method, add support for dummy variables * linting * linting * add optional summary when fitting * small refactor * add xlim, ylim support * removed cox * moved plots to root directory of plots folder * add plots for power example * linting * linting * linting * add csvs * removed test data * remove data and plots from git * add plot summary plot to afr models * add plot summary plot to afr models * fix formatting bugs * fix pred time bug * update afr plot labels * update combined conf * update configuration * linting * update confs * remove time normalization * fix timing bug * fix timing bug * tweak font formatting * tweak result formatting * add partial effect of accuracy plot * update plot confs * refactor plot summary * update plotting in AFR model, add penalizer to all models * re-ran afr plotting * fix sample size bug * add support for multivariate QQ plot * linting * linting * linting * bug fix * re-run * linting * linting * linting * linting * linting * re-ran for IEEE Cloud --------- Co-authored-by: Mohammad Reza Saleh Co-authored-by: salehsedghpour --- deckard/base/model/model.py | 6 +- deckard/layers/afr.py | 356 +++++++++++++++++-------- examples/power/conf/combined_afr.yaml | 51 +++- examples/power/plots/combined_plots.py | 150 ++++++----- examples/power/plots/dvc.lock | 290 ++++++++++---------- 5 files changed, 525 insertions(+), 328 deletions(-) diff --git a/deckard/base/model/model.py b/deckard/base/model/model.py index 13d810f5..bb435143 100644 --- a/deckard/base/model/model.py +++ b/deckard/base/model/model.py @@ -565,7 +565,7 @@ def predict(self, data=None, model=None, predictions_file=None): predictions, { "predict_time": (end - start) / 1e9, - "predict_time_per_sample": (end - start) / (len(data[0]) * 1e9), + "predict_time_per_sample": (end - start) / (len(data[1]) * 1e9), "predict_start_time": start_timestamp, "predict_end_time": end_timestamp, "predict_device": device, @@ -619,7 +619,7 @@ def predict_proba(self, data=None, model=None, probabilities_file=None): predictions, { "predict_proba_time": (end - start) / 1e9, - "predict_proba_time_per_sample": (end - start) / (len(data[0]) * 1e9), + "predict_proba_time_per_sample": (end - start) / (len(data[1]) * 1e9), "predict_proba_start_time": start_timestamp, "predict_proba_end_time": end_timestamp, "predict_proba_device": device, @@ -683,7 +683,7 @@ def predict_log_loss(self, data, model, losses_file=None): { "predict_log_proba_time": (end - start) / 1e9, "predict_log_proba_time_per_sample": (end - start) - / (len(data[0]) * 1e9), + / (len(data[1]) * 1e9), "predict_log_proba_start_time": start_timestamp, "predict_log_proba_end_time": end_timestamp, "predict_log_device": device, diff --git a/deckard/layers/afr.py b/deckard/layers/afr.py index b2aef14c..13787632 100644 --- a/deckard/layers/afr.py +++ b/deckard/layers/afr.py @@ -1,9 +1,13 @@ +# -*- coding: utf-8 -*- +import warnings +from pathlib import Path import pandas as pd import numpy as np -from pathlib import Path - +import matplotlib +import logging +import yaml +import argparse import matplotlib.pyplot as plt - import seaborn as sns from sklearn.model_selection import train_test_split from lifelines import ( @@ -11,21 +15,134 @@ LogNormalAFTFitter, LogLogisticAFTFitter, CoxPHFitter, - WeibullFitter, - LogNormalFitter, - LogLogisticFitter, - plotting, + CRCSplineFitter, ) - +from lifelines.utils import CensoringType +from lifelines.fitters import RegressionFitter from .clean_data import drop_frames_without_results -import matplotlib -import logging -import yaml -import argparse logger = logging.getLogger(__name__) +# Modified from https://github.com/CamDavidsonPilon/lifelines/blob/master/lifelines/calibration.py +def survival_probability_calibration( + model: RegressionFitter, + df: pd.DataFrame, + t0: float, + ax=None, +): + r""" + Smoothed calibration curves for time-to-event models. This is analogous to + calibration curves for classification models, extended to handle survival probabilities + and censoring. Produces a matplotlib figure and some metrics. + + We want to calibrate our model's prediction of :math:`P(T < \text{t0})` against the observed frequencies. + + Parameters + ------------- + + model: + a fitted lifelines regression model to be evaluated + df: DataFrame + a DataFrame - if equal to the training data, then this is an in-sample calibration. Could also be an out-of-sample + dataset. + t0: float + the time to evaluate the probability of event occurring prior at. + + Returns + ---------- + ax: + mpl axes + ICI: + mean absolute difference between predicted and observed + E50: + median absolute difference between predicted and observed + + https://onlinelibrary.wiley.com/doi/full/10.1002/sim.8570 + + """ + + def ccl(p): + return np.log(-np.log(1 - p)) + + if ax is None: + ax = plt.gca() + + T = model.duration_col + E = model.event_col + + predictions_at_t0 = np.clip( + 1 - model.predict_survival_function(df, times=[t0]).T.squeeze(), + 1e-10, + 1 - 1e-10, + ) + + # create new dataset with the predictions + prediction_df = pd.DataFrame( + {"ccl_at_%d" % t0: ccl(predictions_at_t0), T: df[T], E: df[E]}, + ) + + # fit new dataset to flexible spline model + # this new model connects prediction probabilities and actual survival. It should be very flexible, almost to the point of overfitting. It's goal is just to smooth out the data! + n_knots = 3 + regressors = { + "beta_": ["ccl_at_%d" % t0], + "gamma0_": "1", + "gamma1_": "1", + "gamma2_": "1", + } + + # this model is from examples/royson_crowther_clements_splines.py + crc = CRCSplineFitter(n_baseline_knots=n_knots, penalizer=0.000001) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + if CensoringType.is_right_censoring(model): + crc.fit_right_censoring(prediction_df, T, E, regressors=regressors) + elif CensoringType.is_left_censoring(model): + crc.fit_left_censoring(prediction_df, T, E, regressors=regressors) + elif CensoringType.is_interval_censoring(model): + crc.fit_interval_censoring(prediction_df, T, E, regressors=regressors) + + # predict new model at values 0 to 1, but remember to ccl it! + x = np.linspace( + np.clip(predictions_at_t0.min() - 0.01, 0, 1), + np.clip(predictions_at_t0.max() + 0.01, 0, 1), + 100, + ) + y = ( + 1 + - crc.predict_survival_function( + pd.DataFrame({"ccl_at_%d" % t0: ccl(x)}), + times=[t0], + ).T.squeeze() + ) + + # plot our results + + color = "tab:red" + ax.plot(x, y, label="Calibration Curve", color=color) + ax.set_xlabel("Predicted probability of \nt ≤ %d mortality" % t0) + ax.set_ylabel("Observed probability of \nt ≤ %d mortality" % t0, color=color) + ax.tick_params(axis="y", labelcolor=color) + + # plot x=y line + ax.plot(x, x, c="k", ls="--") + ax.legend() + + plt.tight_layout() + + deltas = ( + (1 - crc.predict_survival_function(prediction_df, times=[t0])).T.squeeze() + - predictions_at_t0 + ).abs() + ICI = deltas.mean() + E50 = np.percentile(deltas, 50) + # print("ICI = ", ICI) + # print("E50 = ", E50) + + return ax, ICI, E50 + + def fit_aft( df, event_col, @@ -33,17 +150,18 @@ def fit_aft( mtype, summary_file=None, summary_plot=None, + summary_title=None, folder=None, replacement_dict={}, **kwargs, ): if mtype == "weibull": - aft = WeibullAFTFitter(**kwargs) + aft = WeibullAFTFitter(**kwargs, penalizer=0.1) elif mtype == "log_normal": - aft = LogNormalAFTFitter(**kwargs) + aft = LogNormalAFTFitter(**kwargs, penalizer=0.1) elif mtype == "log_logistic": - aft = LogLogisticAFTFitter(**kwargs) + aft = LogLogisticAFTFitter(**kwargs, penalizer=0.1) elif mtype == "cox": aft = CoxPHFitter(**kwargs) assert ( @@ -80,12 +198,13 @@ def fit_aft( summary.to_csv(summary_file) logger.info(f"Saved summary to {summary_file}") if summary_plot is not None: + if summary_title is None: + summary_title = ( + f"{mtype} AFR Summary".replace("_", " ").replace("-", "").title() + ) plot_summary( aft=aft, - title=kwargs.get( - "title", - f"{mtype} AFT Summary".replace("_", " ").replace("-", "").title(), - ), + title=summary_title, file=summary_plot, xlabel=kwargs.get("xlabel", "Covariate"), ylabel=kwargs.get("ylabel", "p-value"), @@ -117,14 +236,16 @@ def plot_aft( # Only plot the covariates, skipping the intercept and dummy variables # Dummy variables can be examined using the plot_partial_effects function try: - columns = aft.summary.index.get_level_values(1) + columns = list(aft.summary.index.get_level_values(1)) except IndexError: - columns = aft.summary.index + columns = list(aft.summary.index) + clean_cols = [] for col in columns: - if col.startswith("Intercept:"): - columns = columns.drop(col) - if col.startswith("dummy_"): - columns = columns.drop(col) + if col.startswith("dummy_") or col.startswith("Intercept"): + continue + else: + clean_cols.append(col) + columns = clean_cols ax = aft.plot(columns=columns) labels = ax.get_yticklabels() labels = [label.get_text() for label in labels] @@ -161,20 +282,26 @@ def plot_summary( plt.gcf().clear() summary = aft.summary.copy() summary = pd.DataFrame(summary) - try: - cov = summary.index.get_level_values(1) - # print(list(set(cov))) - # input("List of covariates. Press Enter to continue...") - par = summary.index.get_level_values(0) - covariates = [f"{c}: {p}" for p, c in zip(par, cov)] - except IndexError: - covariates = summary.index - ax = sns.barplot(data=summary, x=covariates, y="p") + if isinstance(summary.index, pd.MultiIndex): + covariates = list(summary.index.get_level_values(1)) + summary["covariate"] = covariates + params = list(summary.index.get_level_values(0)) + fullnames = [f"{cov}: {param}" for cov, param in zip(covariates, params)] + else: + covariates = list(summary.index) + summary["covariate"] = covariates + fullnames = covariates + summary["covariate"] = covariates + summary["fullnames"] = fullnames + summary = summary[summary["covariate"] != "Intercept"] + summary = summary[ + summary["covariate"].str.startswith("dummy_") != True # noqa E712 + ] + ax = sns.barplot(data=summary, x="covariate", y="p") ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_title(title) - labels = ax.get_xticklabels() - labels = [label.get_text() for label in labels] + labels = fullnames for k, v in replacement_dict.items(): labels = [label.replace(k, v) for label in labels] ax.set_xticklabels(labels, rotation=90) @@ -186,15 +313,15 @@ def plot_summary( def plot_qq( + X_test, aft, title, file, xlabel=None, ylabel=None, - replacement_dict={}, folder=None, - filetype=".pdf", ax=None, + filetype=".pdf", ): suffix = Path(file).suffix if suffix == "": @@ -204,19 +331,16 @@ def plot_qq( if folder is not None: file = Path(folder, file) plt.gcf().clear() - ax = plotting.qq_plot(aft, ax=ax) - labels = ax.get_yticklabels() - labels = [label.get_text() for label in labels] - for k, v in replacement_dict.items(): - labels = [label.replace(k, v) for label in labels] - ax.set_yticklabels(labels) + ax, ici, e50 = survival_probability_calibration(aft, X_test, t0=0.35, ax=ax) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_title(title) + ax.legend().remove() ax.get_figure().tight_layout() ax.get_figure().savefig(file) logger.info(f"Saved graph to {file}") - return ax + plt.gcf().clear() + return ax, ici, e50 def plot_partial_effects( @@ -261,8 +385,14 @@ def plot_partial_effects( def score_model(aft, train, test): train_score = aft.score(train, scoring_method="concordance_index") test_score = aft.score(test, scoring_method="concordance_index") - scores = {"train_score": train_score, "test_score": test_score} - plt.show() + train_ll = aft.score(train, scoring_method="log_likelihood") + test_ll = aft.score(test, scoring_method="log_likelihood") + scores = { + "train_score": train_score, + "test_score": test_score, + "train_ll": train_ll, + "test_ll": test_ll, + } return scores @@ -271,42 +401,59 @@ def make_afr_table( dataset, X_train, X_test, + icis, + e50s, folder=".", + span_columns=True, ): folder = Path(folder) aft_data = pd.DataFrame() aft_data.index.name = "Model" - aft_data.index = aft_dict.keys() + model_names = [ + x.replace("-", " ").replace("_", " ").title() for x in aft_dict.keys() + ] + aft_data.index = model_names aft_data["AIC"] = [ x.AIC_ if not isinstance(x, CoxPHFitter) else np.nan for x in aft_dict.values() ] - aft_data["Concordance for $x_t$"] = [ - x.concordance_index_ for x in aft_dict.values() - ] - aft_data["Concordance for $x_i$"] = [ - x.score(X_test, scoring_method="concordance_index") for x in aft_dict.values() - ] aft_data["BIC"] = [ x.AIC_ if not isinstance(x, CoxPHFitter) else np.nan for x in aft_dict.values() ] - aft_data["Mean $S(t;\\theta|x_{t})$"] = [ - x.predict_expectation(X_train).mean() for x in aft_dict.values() + aft_data["Concordance"] = [ + x.score(X_train, scoring_method="concordance_index") for x in aft_dict.values() ] - aft_data["Mean $S(t;\\theta|x_{i})$"] = [ - x.predict_expectation(X_test).mean() for x in aft_dict.values() + aft_data["Test Concordance"] = [ + x.score(X_test, scoring_method="concordance_index") for x in aft_dict.values() ] + aft_data["ICI"] = icis + aft_data["E50"] = e50s + pretty_dataset = ( + dataset.upper() + if dataset in ["combined", "Combined", "COMBINED"] + else dataset.upper() + ) aft_data = aft_data.round(2) aft_data.to_csv(folder / "aft_comparison.csv") - logger.info(f"Saved AFT comparison to {folder / 'aft_comparison.csv'}") + logger.info(f"Saved AFR comparison to {folder / 'aft_comparison.csv'}") aft_data = aft_data.round(2) aft_data.fillna("--", inplace=True) aft_data.to_latex( folder / "aft_comparison.tex", - float_format="%.2f", - label=f"tab:{dataset}", - caption=f"Comparison of AFR Models on the {dataset.upper()} dataset.", + float_format="%.3g", # Two decimal places, since we have 100 adversarial examples + label=f"tab:{dataset.lower()}", # Label for cross-referencing + caption=f"Comparison of AFR Models on the {pretty_dataset} dataset.", ) - + # Change to table* if span_columns is True + if span_columns is True: + with open(folder / "aft_comparison.tex", "r") as f: + tex_data = f.read() + tex_data = tex_data.replace( + r"\begin{table}", + r"\begin{table*}" + "\n" + r"\centering", + ) + tex_data = tex_data.replace(r"\end{table}", r"\end{table*}") + with open(folder / "aft_comparison.tex", "w") as f: + f.write(tex_data) return aft_data @@ -364,7 +511,7 @@ def split_data_for_aft( return X_train, X_test -def render_afr_plot( +def run_afr_experiment( mtype, config, X_train, @@ -380,59 +527,43 @@ def render_afr_plot( partial_effect_list = config.pop("partial_effect", []) model_config = config.pop("model", {}) aft = fit_aft( - summary_file=config.get("summary_file", f"{mtype}_summary.csv"), - summary_plot=config.get("summary_plot", f"{mtype}_summary.pdf"), + summary_file=plot_dict.get("summary_file", f"{mtype}_summary.csv"), + summary_plot=plot_dict.get("summary_plot", f"{mtype}_summary.pdf"), folder=folder, df=X_train, event_col=target, duration_col=duration_col, replacement_dict=label_dict, mtype=mtype, + summary_title=plot_dict.get("summary_title", f"{mtype} AFR Summary"), **model_config, ) afr_plot = plot_aft( aft=aft, - title=config.get( - "title", - f"{mtype} AFT".replace("_", " ").replace("-", " ").title(), + title=plot_dict.get( + "qq_title", + f"{mtype}".replace("_", " ").replace("-", " ").title() + " AFR", ), - file=config.get("file", f"{mtype}_aft.pdf"), - xlabel=label_dict.get("xlabel", "Coefficient"), - ylabel=label_dict.get("ylabel", r"$\mathbb{P}~(T>t)$"), # noqa W605 + file=plot_dict.get("plot", f"{mtype}_aft.pdf"), + xlabel=label_dict.get("xlabel", "Acceleration Factor"), + ylabel=label_dict.get("ylabel", ""), # noqa W605 replacement_dict=label_dict, folder=folder, ) plots.append(afr_plot) - if mtype == "cox": - logger.warning("Cox model does not have a CDF plot") - else: - if mtype == "weibull": - univariate_aft = WeibullFitter() - elif mtype == "log_normal": - univariate_aft = LogNormalFitter() - elif mtype == "log_logistic": - univariate_aft = LogLogisticFitter() - else: - raise ValueError(f"Model {mtype} not recognized") - if X_test is not None: - data = X_test - else: - data = X_train - univariate_aft.fit(data[duration_col], data[target]) - cdf_plot = plot_qq( - aft=univariate_aft, - title=config.get( - "title", - f"{mtype}".replace("_", " ").replace("-", " ").title() - + " AFT QQ Plot", - ), - file=config.get("file", f"{mtype}_qq.pdf"), - xlabel=label_dict.get("xlabel", "Theoretical Quantiles"), - ylabel=label_dict.get("ylabel", "Empirical Quantiles"), - replacement_dict=label_dict, - folder=folder, - ) - plots.append(cdf_plot) + qq_plot, ici, e50 = plot_qq( + X_test=X_test, + aft=aft, + title=plot_dict.get( + "title", + f"{mtype}".replace("_", " ").replace("-", " ").title() + " AFR QQ Plot", + ), + file=plot_dict.get("qq_file", f"{mtype}_qq.pdf"), + xlabel=label_dict.get("xlabel", "Observed Quantiles"), + ylabel=label_dict.get("ylabel", "Predicted Quantiles"), + folder=folder, + ) + plots.append(qq_plot) for partial_effect_dict in partial_effect_list: file = partial_effect_dict.pop("file", "partial_effects.pdf") partial_effect_plot = plot_partial_effects( @@ -442,7 +573,7 @@ def render_afr_plot( folder=folder, ) plots.append(partial_effect_plot) - return aft, plots + return aft, plots, ici, e50 def render_all_afr_plots( @@ -451,7 +582,7 @@ def render_all_afr_plots( target, data, dataset, - test_size=0.8, + test_size=0.75, folder=".", ): covariate_list = config.pop("covariates", []) @@ -467,10 +598,12 @@ def render_all_afr_plots( ) plots = {} models = {} + icis = [] + e50s = [] mtypes = list(config.keys()) for mtype in mtypes: sub_config = config.get(mtype, {}) - models[mtype], plots[mtype] = render_afr_plot( + models[mtype], plots[mtype], ici, e50 = run_afr_experiment( mtype=mtype, config=sub_config, X_train=X_train, @@ -479,7 +612,18 @@ def render_all_afr_plots( duration_col=duration_col, folder=folder, ) - aft_data = make_afr_table(models, dataset, X_train, X_test, folder=folder) + icis.append(ici) + e50s.append(e50) + + aft_data = make_afr_table( + models, + dataset, + X_train, + X_test, + folder=folder, + icis=icis, + e50s=e50s, + ) print("*" * 80) print("*" * 34 + " RESULTS " + "*" * 34) print("*" * 80) diff --git a/examples/power/conf/combined_afr.yaml b/examples/power/conf/combined_afr.yaml index d7c15041..3209d7e4 100644 --- a/examples/power/conf/combined_afr.yaml +++ b/examples/power/conf/combined_afr.yaml @@ -23,11 +23,11 @@ weibull: "Intercept: lambda_": "$\\lambda$" "data.sample.random_state: lambda_": "Random State" "atk_value: lambda_": "Attack Strength" - "train_time: lambda_": "$t_{train}$" - "predict_proba_time: lambda_": "$t_{predict}$" + "train_time: lambda_": "$T_{t}$" + "predict_proba_time: lambda_": "$T_{i}$" "adv_accuracy: lambda_": "Adv. Accuracy" "accuracy: lambda_": "Ben. Accuracy" - "adv_fit_time: lambda_": "$t_{attack}$" + "adv_fit_time: lambda_": "$T_{a}$" "adv_failure_rate: lambda_": "$h_{adv.}(t;\\theta)$" "failure_rate: lambda_": "$h_{ben.}(t;\\theta)$" "model.trainer.nb_epoch: lambda_": "No. of Epochs" @@ -65,6 +65,15 @@ weibull: "title": "$\\varepsilon$", "labels": ["1e-4", "1e-3", "1e-2", '1e-1', '1'] } + - file : weibull_accuracy_partial_effect.pdf + covariate_array : accuracy + values_array : [0.5, .9, .99, .999] + title: "$S(t)$ for Weibull AFR" + ylabel: "$\\mathbb{P}~(T>t)$" + xlabel: "Time $t$ (seconds)" + legend_kwargs: + title: "Benign Accuracy" + labels: [".5", ".9", ".99", ".999"] # cox: # plot: # file : cox_aft.pdf @@ -72,11 +81,11 @@ weibull: # labels: # "data.sample.random_state": "Random State" # "atk_value": "Attack Strength" -# "train_time": "$t_{train}$" -# "predict_proba_time": "$t_{predict}$" +# "train_time": "$T_{t}$" +# "predict_proba_time": "$T_{i}$" # "adv_accuracy": "Adv. Accuracy" # "accuracy": "Ben. Accuracy" -# "adv_fit_time": "$t_{attack}$" +# "adv_fit_time": "$T_{a}$" # "adv_failure_rate": "$h_{adv.}(t;\\theta)$" # "failure_rate": "$h_{ben.}(t;\\theta)$" # "model.trainer.nb_epoch": "No. of Epochs" @@ -123,11 +132,11 @@ log_logistic: "Intercept: alpha_": "$\\alpha$" "data.sample.random_state: alpha_": "Random State" "atk_value: alpha_": "Attack Strength" - "train_time: alpha_": "$t_{train}$" - "predict_proba_time: alpha_": "$t_{predict}$" + "train_time: alpha_": "$T_{t}$" + "predict_proba_time: alpha_": "$T_{i}$" "adv_accuracy: alpha_": "Adv. Accuracy" "accuracy: alpha_": "Ben. Accuracy" - "adv_fit_time: alpha_": "$t_{attack}$" + "adv_fit_time: alpha_": "$T_{a}$" "adv_failure_rate: alpha_": "$h_{adv.}(t;\\theta)$" "failure_rate: alpha_": "$h_{ben.}(t;\\theta)$" "model.trainer.nb_epoch: alpha_": "No. of Epochs" @@ -165,6 +174,15 @@ log_logistic: "title": "$\\varepsilon$", "labels": ["1e-4", "1e-3", "1e-2", '1e-1', '1'] } + - file : log_logistic_accuracy_partial_effect.pdf + covariate_array : accuracy + values_array : [0.5, .9, .99, .999] + title: "$S(t)$ for Log-Logistic AFR" + ylabel: "$\\mathbb{P}~(T>t)$" + xlabel: "Time $t$ (seconds)" + legend_kwargs: + title: "Benign Accuracy" + labels: [".5", ".9", ".99", ".999"] log_normal: plot: file : log_normal_aft.pdf @@ -173,11 +191,11 @@ log_normal: "Intercept: sigma_": "$\\sigma$" "Intercept: mu_": "$\\mu$" "atk_value: mu_": "Attack Strength" - "train_time: mu_": "$t_{train}$" - "predict_proba_time: mu_": "$t_{predict}$" + "train_time: mu_": "$T_{t}$" + "predict_proba_time: mu_": "$T_{i}$" "adv_accuracy: mu_": "Adv. Accuracy" "accuracy: mu_": "Ben. Accuracy" - "adv_fit_time: mu_": "$t_{attack}$" + "adv_fit_time: mu_": "$T_{a}$" "adv_failure_rate: mu_": "$h_{adv.}(t;\\theta)$" "failure_rate: mu_": "$h_{ben.}(t;\\theta)$" "model.trainer.nb_epoch: mu_": "No. of Epochs" @@ -216,3 +234,12 @@ log_normal: "title": "$\\varepsilon$", "labels": ["1e-4", "1e-3", "1e-2", '1e-1', '1'] } + - file : log_normal_accuracy_partial_effect.pdf + covariate_array : accuracy + values_array : [0.5, .9, .99, .999] + title: "$S(t)$ for Log-Normal AFR" + ylabel: "$\\mathbb{P}~(T>t)$" + xlabel: "Time $t$ (seconds)" + legend_kwargs: + title: "Benign Accuracy" + labels: [".5", ".9", ".99", ".999"] diff --git a/examples/power/plots/combined_plots.py b/examples/power/plots/combined_plots.py index c0bec030..97f7f6ff 100644 --- a/examples/power/plots/combined_plots.py +++ b/examples/power/plots/combined_plots.py @@ -7,7 +7,7 @@ set_matplotlib_vars() -sns.set_theme(style="whitegrid", font_scale=1.8, font="times new roman") +sns.set_theme(style="whitegrid", font_scale=1.2, font="times new roman") normal_dir = "data" datasets = ["mnist", "cifar", "cifar100"] @@ -32,21 +32,6 @@ extra_df["dataset"] = data big_df = pd.concat([big_df, extra_df], axis=0) -# Normalize the times by sample size -ben_train_samples = pd.Series(big_df["train_time"] / big_df["train_time_per_sample"]) -ben_pred_samples = pd.Series(big_df["predict_time"] / big_df["predict_time_per_sample"]) -adv_pred_samples = pd.Series( - big_df["adv_predict_time"] / big_df["adv_predict_time_per_sample"], -) -big_df = big_df.assign(ben_pred_samples=ben_pred_samples.values) -big_df = big_df.assign(adv_pred_samples=adv_pred_samples.values) -big_df = big_df.assign(ben_train_samples=ben_train_samples.values) -big_df["train_time"] = big_df["train_time"] / big_df["ben_train_samples"] -big_df["predict_time"] = big_df["predict_time"] / (big_df["ben_pred_samples"]) -big_df["adv_fit_time"] = big_df["adv_fit_time"] / big_df["adv_pred_samples"] -big_df["train_power"] = big_df["train_power"] / big_df["ben_train_samples"] -big_df["predict_power"] = big_df["predict_power"] / big_df["ben_pred_samples"] -big_df["adv_fit_power"] = big_df["adv_fit_power"] / big_df["adv_pred_samples"] # Device Metadata memory_bandwith = { @@ -88,16 +73,27 @@ big_df["train_cost"] = big_df["train_time"] * big_df["cost"] big_df["predict_cost"] = big_df["predict_time"] * big_df["cost"] big_df["adv_fit_cost"] = big_df["adv_fit_time"] * big_df["cost"] -for dataset in big_df.dataset.unique(): - big_df.loc[big_df.dataset == dataset, "n_pixels"] = int( - dataset_resolution[dataset] ** 2, - ) - big_df.loc[big_df.dataset == dataset, "n_channels"] = int(dataset_channels[dataset]) - big_df.loc[big_df.dataset == dataset, "n_classes"] = int(dataset_classes[dataset]) -big_df.loc[:, "memory_per_batch"] = ( - big_df[batch_size] * big_df[resolution] * big_df[resolution] * big_df[bit_depth] / 8 -).values +ben_train_samples = 48000 +ben_pred_samples = 12000 +adv_pred_samples = big_df["attack.attack_size"].values +train_cost_per_sample = big_df["train_cost"] / ben_train_samples +predict_cost_per_sample = big_df["predict_cost"] / ben_pred_samples +adv_fit_cost_per_sample = big_df["adv_fit_cost"] / adv_pred_samples +train_power_per_sample = big_df["train_power"] / ben_train_samples +predict_power_per_sample = big_df["predict_power"] / ben_pred_samples +adv_fit_power_per_sample = big_df["adv_fit_power"] / adv_pred_samples +big_df = big_df.reset_index(drop=True) +big_df["train_cost_per_sample"] = train_cost_per_sample.values +big_df["predict_cost_per_sample"] = predict_cost_per_sample.values +big_df["adv_fit_cost_per_sample"] = adv_fit_cost_per_sample.values +big_df["train_power_per_sample"] = train_power_per_sample.values +big_df["predict_power_per_sample"] = predict_power_per_sample.values +big_df["adv_fit_power_per_sample"] = adv_fit_power_per_sample.values +big_df["train_time_per_sample"] = big_df["train_time"] / ben_train_samples +big_df["predict_time_per_sample"] = big_df["predict_time"] / ben_pred_samples +big_df["adv_fit_time_per_sample"] = big_df["adv_fit_time"] / adv_pred_samples + big_df["Device"] = big_df["device_id"].str.replace("-", " ").str.title() big_df = big_df.reset_index(drop=True) Path("data/combined").mkdir(parents=True, exist_ok=True) @@ -105,8 +101,12 @@ big_df.to_csv("data/combined/combined.csv") big_df = pd.read_csv("data/combined/combined.csv", index_col=0, low_memory=False) +# Capitalize all letters in the dataset +big_df["dataset"] = big_df["dataset"].str.upper() +# Split the device_id on the last word and only keep the last word +big_df["Device"] = big_df["device_id"].str.split("-").str[-1].str.upper() # Accuracy Plot -fig, ax = plt.subplots(1, 2, figsize=(5, 5)) +fig, ax = plt.subplots(1, 2, figsize=(8, 3)) ben_acc = sns.boxenplot( data=big_df, x="dataset", @@ -116,7 +116,9 @@ ) ben_acc.set_title("") ben_acc.set_ylabel("Ben. Accuracy") -ben_acc.set_xlabel("Dataset") +ben_acc.set_xlabel("") +ben_acc.tick_params(axis="x", labelsize=12, rotation=0) +ben_acc.set_yscale("linear") ben_acc.legend().remove() adv_acc = sns.boxenplot( data=big_df, @@ -127,123 +129,139 @@ ) adv_acc.set_title("") adv_acc.set_ylabel("Adv. Accuracy") -adv_acc.set_xlabel("Dataset") -adv_acc.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0) -for _, ax in enumerate(fig.axes): - ax.set_xticklabels(ax.get_xticklabels(), rotation=90) +adv_acc.set_yscale("linear") +adv_acc.set_xlabel("") +adv_acc.tick_params(axis="x", labelsize=12, rotation=0) +xticklabels = [item.get_text() for item in adv_acc.get_xticklabels()] +adv_acc.legend() +# for _, ax in enumerate(fig.axes): +# ax.set_xticklabels(ax.get_xticklabels(), rotation=90) fig.tight_layout() fig.savefig("combined/acc.pdf") + +sns.set_theme(style="whitegrid", font_scale=1.8, font="times new roman") + + # Time Plot -fig, ax = plt.subplots(1, 3, figsize=(16, 5)) +fig, ax = plt.subplots(1, 3, figsize=(17, 5)) train_time = sns.boxenplot( data=big_df, x="dataset", - y="train_time", + y="train_time_per_sample", hue="Device", ax=ax[0], ) train_time.set_title("") train_time.set_ylabel("$t_{t}$ (seconds)") -train_time.set_xlabel("Dataset") +train_time.set_xlabel("") +train_time.tick_params(axis="x", labelsize=18) train_time.legend().remove() predict_time = sns.boxenplot( data=big_df, x="dataset", - y="predict_time", + y="predict_time_per_sample", hue="Device", ax=ax[1], ) predict_time.set_title("") predict_time.set_ylabel("$t_{i}$ (seconds)") -predict_time.set_xlabel("Dataset") +predict_time.set_xlabel("") +predict_time.tick_params(axis="x", labelsize=18) predict_time.legend().remove() adv_fit_time = sns.boxenplot( data=big_df, x="dataset", - y="adv_fit_time", + y="adv_fit_time_per_sample", hue="Device", ax=ax[2], ) adv_fit_time.set_title("") adv_fit_time.set_ylabel("$t_{a}$ (seconds)") -adv_fit_time.set_xlabel("Dataset") -adv_fit_time.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0) +adv_fit_time.set_xlabel("") +adv_fit_time.tick_params(axis="x", labelsize=18) +adv_fit_time.legend() fig.tight_layout() fig.savefig("combined/time.pdf") # Power Plot -fig, ax = plt.subplots(1, 3, figsize=(18, 5)) -train_time = sns.boxenplot( +fig, ax = plt.subplots(1, 3, figsize=(17, 5)) +train_power = sns.boxenplot( data=big_df, x="dataset", - y="train_power", + y="train_power_per_sample", hue="Device", ax=ax[0], ) -train_time.set_title("") -train_time.set_ylabel("$P_{t}$ (Watts)") -train_time.set_xlabel("Dataset") -train_time.legend().remove() -predict_time = sns.boxenplot( +train_power.set_title("") +train_power.set_ylabel("$P_{t}$ (Watts)") +train_power.set_xlabel("") +train_power.tick_params(axis="x", labelsize=18) +train_power.legend().remove() +predict_power = sns.boxenplot( data=big_df, x="dataset", - y="predict_power", + y="predict_power_per_sample", hue="Device", ax=ax[1], ) -predict_time.set_title("") -predict_time.set_ylabel("$P_{i}$ (Watts)") -predict_time.set_xlabel("Dataset") -predict_time.legend().remove() -adv_fit_time = sns.boxenplot( +predict_power.set_title("") +predict_power.set_ylabel("$P_{i}$ (Watts)") +predict_power.set_xlabel("") +predict_power.tick_params(axis="x", labelsize=18) +predict_power.legend().remove() +adv_fit_power = sns.boxenplot( data=big_df, x="dataset", - y="adv_fit_power", + y="adv_fit_power_per_sample", hue="Device", ax=ax[2], ) -adv_fit_time.set_title("") -adv_fit_time.set_ylabel("$P_{a}$ (Watts)") -adv_fit_time.set_xlabel("Dataset") -adv_fit_time.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0) +adv_fit_power.set_title("") +adv_fit_power.set_ylabel("$P_{a}$ (Watts)") +adv_fit_power.set_xlabel("") +adv_fit_power.tick_params(axis="x", labelsize=18) +adv_fit_power.legend() fig.tight_layout() fig.savefig("combined/power.pdf") # Cost Plot -fig, ax = plt.subplots(1, 3, figsize=(18, 5)) +fig, ax = plt.subplots(1, 3, figsize=(17, 5)) train_cost = sns.boxenplot( data=big_df, x="dataset", - y="train_cost", + y="train_cost_per_sample", hue="Device", ax=ax[0], ) train_cost.set_title("") train_cost.set_ylabel("$C_{t}$ (USD)") -train_cost.set_xlabel("Dataset") +train_cost.set_xlabel("") +train_cost.tick_params(axis="x", labelsize=18) train_cost.legend().remove() predict_cost = sns.boxenplot( data=big_df, x="dataset", - y="predict_cost", + y="predict_cost_per_sample", hue="Device", ax=ax[1], ) predict_cost.set_title("") predict_cost.set_ylabel("$C_{i}$ (USD)") -predict_cost.set_xlabel("Dataset") +predict_cost.set_xlabel("") +predict_cost.tick_params(axis="x", labelsize=18) predict_cost.legend().remove() adv_fit_cost = sns.boxenplot( data=big_df, x="dataset", - y="adv_fit_cost", + y="adv_fit_cost_per_sample", hue="Device", ax=ax[2], ) adv_fit_cost.set_title("") adv_fit_cost.set_ylabel("$C_{a}$ (USD)") -adv_fit_cost.set_xlabel("Dataset") -adv_fit_cost.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0) +adv_fit_cost.set_xlabel("") +adv_fit_cost.tick_params(axis="x", labelsize=18) +adv_fit_cost.legend() fig.tight_layout() fig.savefig("combined/cost.pdf") diff --git a/examples/power/plots/dvc.lock b/examples/power/plots/dvc.lock index ca70d59e..c75ee7ac 100644 --- a/examples/power/plots/dvc.lock +++ b/examples/power/plots/dvc.lock @@ -88,44 +88,44 @@ stages: outs: - path: cifar100/aft_comparison.csv hash: md5 - md5: 2257bff05cc3b1fbb36f3a73ccb90b78 - size: 271 + md5: b7c15ae615a33408a0b727601cab890a + size: 212 - path: cifar100/aft_comparison.tex hash: md5 - md5: d6648c4e2c632ab5b54354d8ea61b8a4 - size: 530 + md5: 3466f7e3a4024d83bbaa2ccc7b13da80 + size: 472 - path: cifar100/cox_aft.pdf hash: md5 - md5: 9a20be4d8a7ba0972595f38a67e60235 - size: 32152 + md5: 8d4a1219efea2157e2dbd6c34f69c6bb + size: 28926 - path: cifar100/cox_epochs_partial_effect.pdf hash: md5 - md5: f9fcbdf407d7c9d56a2454692fdae1bd + md5: 6e8c834ceef716c83fb2636b2e5e2fb5 size: 33262 - path: cifar100/log_logistic_aft.pdf hash: md5 - md5: ef7565193224d37495079be860534763 - size: 34138 + md5: c2831702b2c06647e434910fd396c72c + size: 29307 - path: cifar100/log_logistic_epochs_partial_effect.pdf hash: md5 - md5: be20812c13584796444702d1af43c8af - size: 27110 + md5: e0210ef32a6a57ed272b59a810a7c240 + size: 27155 - path: cifar100/log_normal_aft.pdf hash: md5 - md5: 3cc4acbfe69718aa196532d90834e313 - size: 36607 + md5: 2313759f670761c972d56b1ab44447de + size: 29841 - path: cifar100/log_normal_epochs_partial_effect.pdf hash: md5 - md5: 929320c0cf4e235a44afd8383f55a3f9 - size: 27990 + md5: fe1c01462a182aec383408ff0082e7a7 + size: 27993 - path: cifar100/weibull_aft.pdf hash: md5 - md5: d8618fb387e7c85479b19548e6220e99 - size: 34233 + md5: 6feac2da8227c48d81b4f50619f03518 + size: 29018 - path: cifar100/weibull_epochs_partial_effect.pdf hash: md5 - md5: 12652d663bc6404d0baaabc85f7f0871 - size: 27982 + md5: 8447c8226062b6498c1df56a2f94f41e + size: 28027 merge@cifar: cmd: python merge.py --big_dir data/bit_depth/cifar --little_dir data/cifar --config ../conf/afr.yaml --data_file power.csv --output_folder cifar/ --output_file @@ -242,44 +242,44 @@ stages: outs: - path: mnist/aft_comparison.csv hash: md5 - md5: ad80e3681729313ec97819530d1cdd42 - size: 269 + md5: 83bac6dcedac880fa889c4eb997ef11b + size: 207 - path: mnist/aft_comparison.tex hash: md5 - md5: a07cff330b81c54a7f0013938df8a65e - size: 524 + md5: 030489512539372044c8f958ce6bab22 + size: 467 - path: mnist/cox_aft.pdf hash: md5 - md5: 920c1e03e024d507c659ad515d941406 - size: 31798 + md5: d7785b4b4e405064c88213aec92d1644 + size: 28581 - path: mnist/cox_epochs_partial_effect.pdf hash: md5 - md5: 879987df1d9576fd1e7e64075c759199 + md5: a5eb4c53909f58fd79fa72cd76de3407 size: 32663 - path: mnist/log_logistic_aft.pdf hash: md5 - md5: a6a42df85fb2740525cdd24f90d1d958 - size: 34388 + md5: 915eb55ca3d44b0c97fc491d9f797425 + size: 29342 - path: mnist/log_logistic_epochs_partial_effect.pdf hash: md5 - md5: e7a1024bf5711b1bb6894f8b02a095ad - size: 27213 + md5: 6fed782e1b3acc31c925ad6747b50068 + size: 27325 - path: mnist/log_normal_aft.pdf hash: md5 - md5: af07ade8cc413c777936d7fe859f4a68 - size: 36004 + md5: 3e78554ecd7648c6abd66d5c073359a1 + size: 29874 - path: mnist/log_normal_epochs_partial_effect.pdf hash: md5 - md5: 9705991df4b2241a1bd4147d8ce1217a - size: 28114 + md5: c3e612dafecb517a3211549982730ab3 + size: 28077 - path: mnist/weibull_aft.pdf hash: md5 - md5: 89cf11db7c661a1d22b3f62e9f072481 - size: 33870 + md5: f3011ee5be6f1854bbe3e75502f89ad0 + size: 29062 - path: mnist/weibull_epochs_partial_effect.pdf hash: md5 - md5: 0aa3d0c8b7a5e4f383d27a812d088111 - size: 26912 + md5: 8aadd28ac46599dee591eaaa162f364d + size: 26910 clean@cifar: cmd: python -m deckard.layers.clean_data --i cifar/merged.csv --o cifar/clean.csv --config "../conf/clean.yaml" @@ -340,84 +340,84 @@ stages: outs: - path: cifar/aft_comparison.csv hash: md5 - md5: 2227175517da80ba886a0074154ed1b9 - size: 270 + md5: 42dad9a57c0b74eee3caffbd85e177d0 + size: 211 - path: cifar/aft_comparison.tex hash: md5 - md5: 9722d9549efabb9cca237c5b4b98a440 - size: 524 + md5: 112fb42d06cc47aee03cf203865133bf + size: 471 - path: cifar/cox_aft.pdf hash: md5 - md5: d5edfeb0291af42ff2fd3cfdfb664554 - size: 32165 + md5: 8a3e6c71760bc98fe64687f518ba0654 + size: 28935 - path: cifar/cox_epochs_partial_effect.pdf hash: md5 - md5: 4cdc932bc4d01b83bb38bbc71968bac1 + md5: c9ad760d1e59f90218ca4a7592bcd309 size: 33196 - path: cifar/log_logistic_aft.pdf hash: md5 - md5: 088e71029a7cff65cf3ceb6c263952d8 - size: 34161 + md5: af4360e69eb314e5717407a543c9cd37 + size: 29703 - path: cifar/log_logistic_epochs_partial_effect.pdf hash: md5 - md5: 98de225c30a610cd81cd6fe96f25b554 - size: 27406 + md5: 17fa89d624dd206070dd6669254e287f + size: 27433 - path: cifar/log_normal_aft.pdf hash: md5 - md5: b31b503b98648af6342a68e329fdca29 - size: 36603 + md5: f7aebd3a3cc48ef9f4c04279af54b2bc + size: 30232 - path: cifar/log_normal_epochs_partial_effect.pdf hash: md5 - md5: b41fe0dbadb14fc98ee4dcb20a43b4a7 - size: 28108 + md5: f2d95584d261226817a894b895d276e7 + size: 28162 - path: cifar/weibull_aft.pdf hash: md5 - md5: 155df863028296856bd080794542f77a - size: 33881 + md5: 9c45ad78f653dcdfff593545c5c8921b + size: 28724 - path: cifar/weibull_epochs_partial_effect.pdf hash: md5 - md5: de43e7081c5b6bc34407132fba234551 - size: 26982 + md5: 8aa46a3fec32679b6a2e29fea439bb43 + size: 28030 combined_plots: cmd: python combined_plots.py deps: - path: cifar/aft_comparison.csv hash: md5 - md5: 2227175517da80ba886a0074154ed1b9 - size: 270 + md5: 42dad9a57c0b74eee3caffbd85e177d0 + size: 211 - path: cifar100/aft_comparison.csv hash: md5 - md5: 2257bff05cc3b1fbb36f3a73ccb90b78 - size: 271 + md5: b7c15ae615a33408a0b727601cab890a + size: 212 - path: combined_plots.py hash: md5 - md5: caec96ccfbae3aaec2f390c7fd959e88 - size: 7151 + md5: 7ea9df483b87b41bbe1195b55275bd6c + size: 7724 - path: mnist/aft_comparison.csv hash: md5 - md5: ad80e3681729313ec97819530d1cdd42 - size: 269 + md5: 83bac6dcedac880fa889c4eb997ef11b + size: 207 outs: - path: combined/acc.pdf hash: md5 - md5: 37fdbe264a3b8e614613b262d81530e1 - size: 34267 + md5: c379bcfd7e96effb55ac149fd16e1efe + size: 30390 - path: combined/cost.pdf hash: md5 - md5: 1a86f9967e23e37efb3d26f8c75c13cd - size: 45578 + md5: cab8446b120746a4a9d8a54fcafa2f58 + size: 39900 - path: combined/power.pdf hash: md5 - md5: f12c54cd9910c8fbce4bb88df20272e5 - size: 44335 + md5: 5034f0a136246c06c9fcb48e6f58c94b + size: 40090 - path: combined/time.pdf hash: md5 - md5: 225f6dab8cde97a6dab310da0baee421 - size: 45499 + md5: 71b708cc731f911d999db3c5d3fc977a + size: 40222 - path: data/combined/combined.csv hash: md5 - md5: d22720f16c4bc98dc65808923b99c642 - size: 52360413 + md5: 5373a0cba09196ebf00bbfbab9378398 + size: 52639498 combined_clean: cmd: python -m deckard.layers.clean_data --i data/combined/combined.csv --o combined/clean.csv --config "../conf/clean.yaml" @@ -428,8 +428,8 @@ stages: size: 513 - path: data/combined/combined.csv hash: md5 - md5: d22720f16c4bc98dc65808923b99c642 - size: 52360413 + md5: 5373a0cba09196ebf00bbfbab9378398 + size: 52639498 params: ../conf/clean.yaml: attacks: @@ -460,8 +460,8 @@ stages: outs: - path: combined/clean.csv hash: md5 - md5: 9c8adccd19445c4a933a3b3d8892ff2e - size: 37144640 + md5: 24f49787a6d20f9172dacf9edefff85f + size: 37298176 combined_afr: cmd: python -m deckard.layers.afr --dataset Combined --data_file combined/clean.csv --config_file "../conf/combined_afr.yaml" --plots_folder combined/ --target @@ -469,138 +469,146 @@ stages: deps: - path: ../conf/combined_afr.yaml hash: md5 - md5: 589c50684ec03d7a423a6668943b8540 - size: 7989 + md5: 068c842adac014ff65cbf74d28e57263 + size: 8923 - path: combined/clean.csv hash: md5 - md5: 9c8adccd19445c4a933a3b3d8892ff2e - size: 37144640 + md5: 24f49787a6d20f9172dacf9edefff85f + size: 37298176 outs: - path: combined/aft_comparison.csv hash: md5 - md5: 3a0019b14a09be37adbd259de9dbad16 - size: 247 + md5: 238ff959f5768d165bddb38595db5d37 + size: 188 - path: combined/aft_comparison.tex hash: md5 - md5: feb86ff3f9329c0b3b7c058ef7302a14 - size: 491 + md5: 14bc20405c6aadb2dfc4e421cd38ca10 + size: 435 - path: combined/log_logistic_aft.pdf hash: md5 - md5: c9db22681bf3745778ea843d719721b0 - size: 33880 + md5: d7b9b3f4d713ff30a3ac12338b548a74 + size: 26842 - path: combined/log_logistic_attack_eps_partial_effect.pdf hash: md5 - md5: cdb0049ea99584d23c278dd8592bd989 - size: 27181 + md5: 785a8d08bde202772f2127ee7d8c33de + size: 26868 - path: combined/log_logistic_batch_size_partial_effect.pdf hash: md5 - md5: 65e6391d402d097451047377b90adbeb - size: 29102 + md5: 4facc4ccdd837b805a1e01d5bbaeaa97 + size: 28790 - path: combined/log_logistic_epochs_partial_effect.pdf hash: md5 - md5: 8b25821be8ab1238fd0df4e3ffc04c9d - size: 29857 + md5: 720b3775b9aacafde7db37012ceb54ff + size: 27938 - path: combined/log_logistic_qq.pdf hash: md5 - md5: 1cb4bc801e30781159a6257ae6fc85b1 - size: 80192 + md5: b3961143f38c1f19fe26aa05f2cb3ca4 + size: 18763 - path: combined/log_logistic_summary.csv hash: md5 - md5: 6989c7b4f0a7c8ca3a7439e741102ca2 - size: 1632 + md5: c0cbc323a723516dbd3087bb9440caf1 + size: 3486 - path: combined/log_logistic_summary.pdf hash: md5 - md5: 3dc8c0d448bc0e36c74c1a1f5cbc7bd5 - size: 35524 + md5: 283e08bfbb7f67667dd5126358f3d160 + size: 29010 - path: combined/log_normal_aft.pdf hash: md5 - md5: c760153142ede9a15425be58b880a684 - size: 35466 + md5: ae6452dd66e6b5c2c4d493d2f2e2de27 + size: 27359 - path: combined/log_normal_attack_eps_partial_effect.pdf hash: md5 - md5: 82274edbc7615827300fda871dccaf1e - size: 27961 + md5: 982eb06fd39148ec8a540528b4a7886a + size: 27579 - path: combined/log_normal_batch_size_partial_effect.pdf hash: md5 - md5: e5b704ae750e4eb6a953605a8281725b - size: 29838 + md5: af939d5bf9a83e5e606c5f09aaa481ba + size: 29598 - path: combined/log_normal_epochs_partial_effect.pdf hash: md5 - md5: aa0d6c3febb789ca68641151b3133032 - size: 29034 + md5: b3dd6798f6377a13ac425e53cf34bea8 + size: 28603 - path: combined/log_normal_qq.pdf hash: md5 - md5: 31c7e37d8cb51050080abfb5cd291882 - size: 80638 + md5: 202f50d80f2be2a0a1dfafa691b99eb2 + size: 19906 - path: combined/log_normal_summary.csv hash: md5 - md5: f0744e8379bac0a0216e96901095f342 - size: 3113 + md5: 9025cfe2f47a1a1aa0d9fb3575072712 + size: 3445 - path: combined/log_normal_summary.pdf hash: md5 - md5: b5466fe064ffa39f6159f120bf792484 - size: 35189 + md5: af4ebf73e8d223c5fc6dcd274ff79b49 + size: 27645 - path: combined/weibull_aft.pdf hash: md5 - md5: 7880981ca4a8b19e06604751251a2968 - size: 33784 + md5: 099c99556c6c16cd21714f6ea96790d9 + size: 26881 - path: combined/weibull_attack_eps_partial_effect.pdf hash: md5 - md5: a6cad8a16922d9bbb052be3196d70b03 - size: 26877 + md5: cd9bdcb36c0012ac355a1ef9bf7260c8 + size: 26258 - path: combined/weibull_batch_size_partial_effect.pdf hash: md5 - md5: 5d08cea34b926292ffce4db22c080eb2 - size: 29041 + md5: b98d5c459d73e65d994b2a23db1f71ec + size: 28543 - path: combined/weibull_epochs_partial_effect.pdf hash: md5 - md5: 4d01fcfc26dc51be5880e69ccc8b9358 - size: 28314 + md5: 8a34f173079fa7f7f5667d84d77d2768 + size: 27623 - path: combined/weibull_qq.pdf hash: md5 - md5: 6dea1bd31c5d640dd116f82e667f24da - size: 79726 + md5: 5dae104737074ab926ef947938254eda + size: 18121 - path: combined/weibull_summary.csv hash: md5 - md5: 546350a82b4ec0551907bfff1ef2a0b3 - size: 2811 + md5: 60e0589a5a15d42dff93363a7eb4321d + size: 3496 - path: combined/weibull_summary.pdf hash: md5 - md5: 50e1bd836cf02de0b737b7d172f93d8e - size: 35567 + md5: c9a8302a6c526187170c7f2c3a0e2db8 + size: 27676 copy@mnist: + cmd: rm -rf ~/kepler-ml/plots/mnist/ && mkdir -p ~/kepler-ml/plots/mnist/ && cp + -r mnist/* ~/kepler-ml/plots/mnist/ cmd: rm -rf ~/kepler-ml/plots/mnist/ && mkdir -p ~/kepler-ml/plots/mnist/ && cp -r mnist/* ~/kepler-ml/plots/mnist/ deps: - path: mnist/ hash: md5 - md5: 0bb4ea08e80e765597832dc64485e118.dir - size: 25194927 - nfiles: 23 + md5: a31729c195d8a0130c0235f50240f8be.dir + size: 25145164 + nfiles: 24 copy@cifar: + cmd: rm -rf ~/kepler-ml/plots/cifar/ && mkdir -p ~/kepler-ml/plots/cifar/ && cp + -r cifar/* ~/kepler-ml/plots/cifar/ cmd: rm -rf ~/kepler-ml/plots/cifar/ && mkdir -p ~/kepler-ml/plots/cifar/ && cp -r cifar/* ~/kepler-ml/plots/cifar/ deps: - path: cifar/ hash: md5 - md5: d10b2f2706a58f75667b4ac487b1c8a1.dir - size: 25102765 - nfiles: 23 + md5: b407c30ea9d76581e3e6c2aba9c72c7d.dir + size: 25054465 + nfiles: 24 copy@cifar100: + cmd: rm -rf ~/kepler-ml/plots/cifar100/ && mkdir -p ~/kepler-ml/plots/cifar100/ + && cp -r cifar100/* ~/kepler-ml/plots/cifar100/ cmd: rm -rf ~/kepler-ml/plots/cifar100/ && mkdir -p ~/kepler-ml/plots/cifar100/ && cp -r cifar100/* ~/kepler-ml/plots/cifar100/ deps: - path: cifar100/ hash: md5 - md5: 836fbb072b87eba8ee7a4ab66aa0ef15.dir - size: 29910294 - nfiles: 23 + md5: 75185a117f2df768d81bf7005ad94a9f.dir + size: 29860670 + nfiles: 24 copy@combined: + cmd: rm -rf ~/kepler-ml/plots/combined/ && mkdir -p ~/kepler-ml/plots/combined/ + && cp -r combined/* ~/kepler-ml/plots/combined/ cmd: rm -rf ~/kepler-ml/plots/combined/ && mkdir -p ~/kepler-ml/plots/combined/ && cp -r combined/* ~/kepler-ml/plots/combined/ deps: - path: combined/ hash: md5 - md5: 3cd1bd6ec5475c44d40a394599d0f823.dir - size: 38119666 - nfiles: 32 + md5: fadbd9a0be85c67c2d8d384d201be0b8.dir + size: 38115902 + nfiles: 35